mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-09 12:49:23 +00:00
Rename the coins folder to networks (#583)
* Rename the coins folder to networks Ethereum isn't a coin. It's a network. Resolves #357. * More renames of coins -> networks in orchestration * Correct paths in tests/ * cargo fmt
This commit is contained in:
41
networks/monero/ringct/borromean/Cargo.toml
Normal file
41
networks/monero/ringct/borromean/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "monero-borromean"
|
||||
version = "0.1.0"
|
||||
description = "Borromean ring signatures arranged into a range proof, as done by the Monero protocol"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/ringct/borromean"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
# Cryptographic dependencies
|
||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
|
||||
|
||||
# Other Monero dependencies
|
||||
monero-io = { path = "../../io", version = "0.1", default-features = false }
|
||||
monero-generators = { path = "../../generators", version = "0.4", default-features = false }
|
||||
monero-primitives = { path = "../../primitives", version = "0.1", default-features = false }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"zeroize/std",
|
||||
|
||||
"monero-io/std",
|
||||
"monero-generators/std",
|
||||
"monero-primitives/std",
|
||||
]
|
||||
default = ["std"]
|
||||
21
networks/monero/ringct/borromean/LICENSE
Normal file
21
networks/monero/ringct/borromean/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
12
networks/monero/ringct/borromean/README.md
Normal file
12
networks/monero/ringct/borromean/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Monero Borromean
|
||||
|
||||
Borromean ring signatures arranged into a range proof, as done by the Monero
|
||||
protocol.
|
||||
|
||||
This library is usable under no-std when the `std` feature (on by default) is
|
||||
disabled.
|
||||
|
||||
### Cargo Features
|
||||
|
||||
- `std` (on by default): Enables `std` (and with it, more efficient internal
|
||||
implementations).
|
||||
112
networks/monero/ringct/borromean/src/lib.rs
Normal file
112
networks/monero/ringct/borromean/src/lib.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
use std_shims::io::{self, Read, Write};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::{traits::Identity, Scalar, EdwardsPoint};
|
||||
|
||||
use monero_io::*;
|
||||
use monero_generators::H_pow_2;
|
||||
use monero_primitives::{keccak256_to_scalar, UnreducedScalar};
|
||||
|
||||
// 64 Borromean ring signatures, as needed for a 64-bit range proof.
|
||||
//
|
||||
// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
|
||||
// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
|
||||
// algorithm which was in use.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
struct BorromeanSignatures {
|
||||
s0: [UnreducedScalar; 64],
|
||||
s1: [UnreducedScalar; 64],
|
||||
ee: Scalar,
|
||||
}
|
||||
|
||||
impl BorromeanSignatures {
|
||||
// Read a set of BorromeanSignatures.
|
||||
fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
Ok(BorromeanSignatures {
|
||||
s0: read_array(UnreducedScalar::read, r)?,
|
||||
s1: read_array(UnreducedScalar::read, r)?,
|
||||
ee: read_scalar(r)?,
|
||||
})
|
||||
}
|
||||
|
||||
// Write the set of BorromeanSignatures.
|
||||
fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for s0 in &self.s0 {
|
||||
s0.write(w)?;
|
||||
}
|
||||
for s1 in &self.s1 {
|
||||
s1.write(w)?;
|
||||
}
|
||||
write_scalar(&self.ee, w)
|
||||
}
|
||||
|
||||
fn verify(&self, keys_a: &[EdwardsPoint], keys_b: &[EdwardsPoint]) -> bool {
|
||||
let mut transcript = [0; 2048];
|
||||
|
||||
for i in 0 .. 64 {
|
||||
#[allow(non_snake_case)]
|
||||
let LL = EdwardsPoint::vartime_double_scalar_mul_basepoint(
|
||||
&self.ee,
|
||||
&keys_a[i],
|
||||
&self.s0[i].recover_monero_slide_scalar(),
|
||||
);
|
||||
#[allow(non_snake_case)]
|
||||
let LV = EdwardsPoint::vartime_double_scalar_mul_basepoint(
|
||||
&keccak256_to_scalar(LL.compress().as_bytes()),
|
||||
&keys_b[i],
|
||||
&self.s1[i].recover_monero_slide_scalar(),
|
||||
);
|
||||
transcript[(i * 32) .. ((i + 1) * 32)].copy_from_slice(LV.compress().as_bytes());
|
||||
}
|
||||
|
||||
keccak256_to_scalar(transcript) == self.ee
|
||||
}
|
||||
}
|
||||
|
||||
/// A range proof premised on Borromean ring signatures.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct BorromeanRange {
|
||||
sigs: BorromeanSignatures,
|
||||
bit_commitments: [EdwardsPoint; 64],
|
||||
}
|
||||
|
||||
impl BorromeanRange {
|
||||
/// Read a BorromeanRange proof.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanRange> {
|
||||
Ok(BorromeanRange {
|
||||
sigs: BorromeanSignatures::read(r)?,
|
||||
bit_commitments: read_array(read_point, r)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the BorromeanRange proof.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.sigs.write(w)?;
|
||||
write_raw_vec(write_point, &self.bit_commitments, w)
|
||||
}
|
||||
|
||||
/// Verify the commitment contains a 64-bit value.
|
||||
#[must_use]
|
||||
pub fn verify(&self, commitment: &EdwardsPoint) -> bool {
|
||||
if &self.bit_commitments.iter().sum::<EdwardsPoint>() != commitment {
|
||||
return false;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let H_pow_2 = H_pow_2();
|
||||
let mut commitments_sub_one = [EdwardsPoint::identity(); 64];
|
||||
for i in 0 .. 64 {
|
||||
commitments_sub_one[i] = self.bit_commitments[i] - H_pow_2[i];
|
||||
}
|
||||
|
||||
self.sigs.verify(&self.bit_commitments, &commitments_sub_one)
|
||||
}
|
||||
}
|
||||
55
networks/monero/ringct/bulletproofs/Cargo.toml
Normal file
55
networks/monero/ringct/bulletproofs/Cargo.toml
Normal file
@@ -0,0 +1,55 @@
|
||||
[package]
|
||||
name = "monero-bulletproofs"
|
||||
version = "0.1.0"
|
||||
description = "Bulletproofs(+) range proofs, as defined by the Monero protocol"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/ringct/bulletproofs"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false }
|
||||
|
||||
thiserror = { version = "1", default-features = false, optional = true }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
# Cryptographic dependencies
|
||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
|
||||
|
||||
# Other Monero dependencies
|
||||
monero-io = { path = "../../io", version = "0.1", default-features = false }
|
||||
monero-generators = { path = "../../generators", version = "0.4", default-features = false }
|
||||
monero-primitives = { path = "../../primitives", version = "0.1", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
|
||||
monero-generators = { path = "../../generators", version = "0.4", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.4"
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"thiserror",
|
||||
|
||||
"rand_core/std",
|
||||
"zeroize/std",
|
||||
|
||||
"monero-io/std",
|
||||
"monero-generators/std",
|
||||
"monero-primitives/std",
|
||||
]
|
||||
compile-time-generators = ["curve25519-dalek/precomputed-tables"]
|
||||
default = ["std", "compile-time-generators"]
|
||||
21
networks/monero/ringct/bulletproofs/LICENSE
Normal file
21
networks/monero/ringct/bulletproofs/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
14
networks/monero/ringct/bulletproofs/README.md
Normal file
14
networks/monero/ringct/bulletproofs/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Monero Bulletproofs(+)
|
||||
|
||||
Bulletproofs(+) range proofs, as defined by the Monero protocol.
|
||||
|
||||
This library is usable under no-std when the `std` feature (on by default) is
|
||||
disabled.
|
||||
|
||||
### Cargo Features
|
||||
|
||||
- `std` (on by default): Enables `std` (and with it, more efficient internal
|
||||
implementations).
|
||||
- `compile-time-generators` (on by default): Derives the generators at
|
||||
compile-time so they don't need to be derived at runtime. This is recommended
|
||||
if program size doesn't need to be kept minimal.
|
||||
88
networks/monero/ringct/bulletproofs/build.rs
Normal file
88
networks/monero/ringct/bulletproofs/build.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use std::{
|
||||
io::Write,
|
||||
env,
|
||||
path::Path,
|
||||
fs::{File, remove_file},
|
||||
};
|
||||
|
||||
#[cfg(feature = "compile-time-generators")]
|
||||
fn generators(prefix: &'static str, path: &str) {
|
||||
use curve25519_dalek::EdwardsPoint;
|
||||
|
||||
use monero_generators::bulletproofs_generators;
|
||||
|
||||
fn serialize(generators_string: &mut String, points: &[EdwardsPoint]) {
|
||||
for generator in points {
|
||||
generators_string.extend(
|
||||
format!(
|
||||
"
|
||||
curve25519_dalek::edwards::CompressedEdwardsY({:?}).decompress().unwrap(),
|
||||
",
|
||||
generator.compress().to_bytes()
|
||||
)
|
||||
.chars(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let generators = bulletproofs_generators(prefix.as_bytes());
|
||||
#[allow(non_snake_case)]
|
||||
let mut G_str = String::new();
|
||||
serialize(&mut G_str, &generators.G);
|
||||
#[allow(non_snake_case)]
|
||||
let mut H_str = String::new();
|
||||
serialize(&mut H_str, &generators.H);
|
||||
|
||||
let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path);
|
||||
let _ = remove_file(&path);
|
||||
File::create(&path)
|
||||
.unwrap()
|
||||
.write_all(
|
||||
format!(
|
||||
"
|
||||
static GENERATORS_CELL: OnceLock<Generators> = OnceLock::new();
|
||||
pub(crate) fn GENERATORS() -> &'static Generators {{
|
||||
GENERATORS_CELL.get_or_init(|| Generators {{
|
||||
G: std_shims::vec![
|
||||
{G_str}
|
||||
],
|
||||
H: std_shims::vec![
|
||||
{H_str}
|
||||
],
|
||||
}})
|
||||
}}
|
||||
",
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "compile-time-generators"))]
|
||||
fn generators(prefix: &'static str, path: &str) {
|
||||
let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path);
|
||||
let _ = remove_file(&path);
|
||||
File::create(&path)
|
||||
.unwrap()
|
||||
.write_all(
|
||||
format!(
|
||||
r#"
|
||||
static GENERATORS_CELL: OnceLock<Generators> = OnceLock::new();
|
||||
pub(crate) fn GENERATORS() -> &'static Generators {{
|
||||
GENERATORS_CELL.get_or_init(|| {{
|
||||
monero_generators::bulletproofs_generators(b"{prefix}")
|
||||
}})
|
||||
}}
|
||||
"#,
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
generators("bulletproof", "generators.rs");
|
||||
generators("bulletproof_plus", "generators_plus.rs");
|
||||
}
|
||||
101
networks/monero/ringct/bulletproofs/src/batch_verifier.rs
Normal file
101
networks/monero/ringct/bulletproofs/src/batch_verifier.rs
Normal file
@@ -0,0 +1,101 @@
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_POINT,
|
||||
traits::{IsIdentity, VartimeMultiscalarMul},
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint,
|
||||
};
|
||||
|
||||
use monero_generators::{H, Generators};
|
||||
|
||||
use crate::{original, plus};
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct InternalBatchVerifier {
|
||||
pub(crate) g: Scalar,
|
||||
pub(crate) h: Scalar,
|
||||
pub(crate) g_bold: Vec<Scalar>,
|
||||
pub(crate) h_bold: Vec<Scalar>,
|
||||
pub(crate) other: Vec<(Scalar, EdwardsPoint)>,
|
||||
}
|
||||
|
||||
impl InternalBatchVerifier {
|
||||
#[must_use]
|
||||
fn verify(self, G: EdwardsPoint, H: EdwardsPoint, generators: &Generators) -> bool {
|
||||
let capacity = 2 + self.g_bold.len() + self.h_bold.len() + self.other.len();
|
||||
let mut scalars = Vec::with_capacity(capacity);
|
||||
let mut points = Vec::with_capacity(capacity);
|
||||
|
||||
scalars.push(self.g);
|
||||
points.push(G);
|
||||
|
||||
scalars.push(self.h);
|
||||
points.push(H);
|
||||
|
||||
for (i, g_bold) in self.g_bold.into_iter().enumerate() {
|
||||
scalars.push(g_bold);
|
||||
points.push(generators.G[i]);
|
||||
}
|
||||
|
||||
for (i, h_bold) in self.h_bold.into_iter().enumerate() {
|
||||
scalars.push(h_bold);
|
||||
points.push(generators.H[i]);
|
||||
}
|
||||
|
||||
for (scalar, point) in self.other {
|
||||
scalars.push(scalar);
|
||||
points.push(point);
|
||||
}
|
||||
|
||||
EdwardsPoint::vartime_multiscalar_mul(scalars, points).is_identity()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct BulletproofsBatchVerifier(pub(crate) InternalBatchVerifier);
|
||||
impl BulletproofsBatchVerifier {
|
||||
#[must_use]
|
||||
pub(crate) fn verify(self) -> bool {
|
||||
self.0.verify(ED25519_BASEPOINT_POINT, H(), original::GENERATORS())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct BulletproofsPlusBatchVerifier(pub(crate) InternalBatchVerifier);
|
||||
impl BulletproofsPlusBatchVerifier {
|
||||
#[must_use]
|
||||
pub(crate) fn verify(self) -> bool {
|
||||
// Bulletproofs+ is written as per the paper, with G for the value and H for the mask
|
||||
// Monero uses H for the value and G for the mask
|
||||
self.0.verify(H(), ED25519_BASEPOINT_POINT, plus::GENERATORS())
|
||||
}
|
||||
}
|
||||
|
||||
/// A batch verifier for Bulletproofs(+).
|
||||
///
|
||||
/// This uses a fixed layout such that all fixed points only incur a single point scaling,
|
||||
/// regardless of the amounts of proofs verified. For all variable points (commitments), they're
|
||||
/// accumulated with the fixed points into a single multiscalar multiplication.
|
||||
#[derive(Default)]
|
||||
pub struct BatchVerifier {
|
||||
pub(crate) original: BulletproofsBatchVerifier,
|
||||
pub(crate) plus: BulletproofsPlusBatchVerifier,
|
||||
}
|
||||
impl BatchVerifier {
|
||||
/// Create a new batch verifier.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
original: BulletproofsBatchVerifier(InternalBatchVerifier::default()),
|
||||
plus: BulletproofsPlusBatchVerifier(InternalBatchVerifier::default()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify all of the proofs queued within this batch verifier.
|
||||
///
|
||||
/// This uses a variable-time multiscalar multiplication internally.
|
||||
#[must_use]
|
||||
pub fn verify(self) -> bool {
|
||||
self.original.verify() && self.plus.verify()
|
||||
}
|
||||
}
|
||||
74
networks/monero/ringct/bulletproofs/src/core.rs
Normal file
74
networks/monero/ringct/bulletproofs/src/core.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use std_shims::{vec, vec::Vec};
|
||||
|
||||
use curve25519_dalek::{
|
||||
traits::{MultiscalarMul, VartimeMultiscalarMul},
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint,
|
||||
};
|
||||
|
||||
pub(crate) use monero_generators::{MAX_COMMITMENTS, COMMITMENT_BITS, LOG_COMMITMENT_BITS};
|
||||
|
||||
pub(crate) fn multiexp(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
|
||||
let mut buf_scalars = Vec::with_capacity(pairs.len());
|
||||
let mut buf_points = Vec::with_capacity(pairs.len());
|
||||
for (scalar, point) in pairs {
|
||||
buf_scalars.push(scalar);
|
||||
buf_points.push(point);
|
||||
}
|
||||
EdwardsPoint::multiscalar_mul(buf_scalars, buf_points)
|
||||
}
|
||||
|
||||
pub(crate) fn multiexp_vartime(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
|
||||
let mut buf_scalars = Vec::with_capacity(pairs.len());
|
||||
let mut buf_points = Vec::with_capacity(pairs.len());
|
||||
for (scalar, point) in pairs {
|
||||
buf_scalars.push(scalar);
|
||||
buf_points.push(point);
|
||||
}
|
||||
EdwardsPoint::vartime_multiscalar_mul(buf_scalars, buf_points)
|
||||
}
|
||||
|
||||
/*
|
||||
This has room for optimization worth investigating further. It currently takes
|
||||
an iterative approach. It can be optimized further via divide and conquer.
|
||||
|
||||
Assume there are 4 challenges.
|
||||
|
||||
Iterative approach (current):
|
||||
1. Do the optimal multiplications across challenge column 0 and 1.
|
||||
2. Do the optimal multiplications across that result and column 2.
|
||||
3. Do the optimal multiplications across that result and column 3.
|
||||
|
||||
Divide and conquer (worth investigating further):
|
||||
1. Do the optimal multiplications across challenge column 0 and 1.
|
||||
2. Do the optimal multiplications across challenge column 2 and 3.
|
||||
3. Multiply both results together.
|
||||
|
||||
When there are 4 challenges (n=16), the iterative approach does 28 multiplications
|
||||
versus divide and conquer's 24.
|
||||
*/
|
||||
pub(crate) fn challenge_products(challenges: &[(Scalar, Scalar)]) -> Vec<Scalar> {
|
||||
let mut products = vec![Scalar::ONE; 1 << challenges.len()];
|
||||
|
||||
if !challenges.is_empty() {
|
||||
products[0] = challenges[0].1;
|
||||
products[1] = challenges[0].0;
|
||||
|
||||
for (j, challenge) in challenges.iter().enumerate().skip(1) {
|
||||
let mut slots = (1 << (j + 1)) - 1;
|
||||
while slots > 0 {
|
||||
products[slots] = products[slots / 2] * challenge.0;
|
||||
products[slots - 1] = products[slots / 2] * challenge.1;
|
||||
|
||||
slots = slots.saturating_sub(2);
|
||||
}
|
||||
}
|
||||
|
||||
// Sanity check since if the above failed to populate, it'd be critical
|
||||
for product in &products {
|
||||
debug_assert!(*product != Scalar::ZERO);
|
||||
}
|
||||
}
|
||||
|
||||
products
|
||||
}
|
||||
292
networks/monero/ringct/bulletproofs/src/lib.rs
Normal file
292
networks/monero/ringct/bulletproofs/src/lib.rs
Normal file
@@ -0,0 +1,292 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use std_shims::{
|
||||
vec,
|
||||
vec::Vec,
|
||||
io::{self, Read, Write},
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use monero_io::*;
|
||||
pub use monero_generators::MAX_COMMITMENTS;
|
||||
use monero_primitives::Commitment;
|
||||
|
||||
pub(crate) mod scalar_vector;
|
||||
pub(crate) mod point_vector;
|
||||
|
||||
pub(crate) mod core;
|
||||
use crate::core::LOG_COMMITMENT_BITS;
|
||||
|
||||
pub(crate) mod batch_verifier;
|
||||
use batch_verifier::{BulletproofsBatchVerifier, BulletproofsPlusBatchVerifier};
|
||||
pub use batch_verifier::BatchVerifier;
|
||||
|
||||
pub(crate) mod original;
|
||||
use crate::original::{
|
||||
IpProof, AggregateRangeStatement as OriginalStatement, AggregateRangeWitness as OriginalWitness,
|
||||
AggregateRangeProof as OriginalProof,
|
||||
};
|
||||
|
||||
pub(crate) mod plus;
|
||||
use crate::plus::{
|
||||
WipProof, AggregateRangeStatement as PlusStatement, AggregateRangeWitness as PlusWitness,
|
||||
AggregateRangeProof as PlusProof,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// An error from proving/verifying Bulletproofs(+).
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum BulletproofError {
|
||||
/// Proving/verifying a Bulletproof(+) range proof with no commitments.
|
||||
#[cfg_attr(feature = "std", error("no commitments to prove the range for"))]
|
||||
NoCommitments,
|
||||
/// Proving/verifying a Bulletproof(+) range proof with more commitments than supported.
|
||||
#[cfg_attr(feature = "std", error("too many commitments to prove the range for"))]
|
||||
TooManyCommitments,
|
||||
}
|
||||
|
||||
/// A Bulletproof(+).
|
||||
///
|
||||
/// This encapsulates either a Bulletproof or a Bulletproof+.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Bulletproof {
|
||||
/// A Bulletproof.
|
||||
Original(OriginalProof),
|
||||
/// A Bulletproof+.
|
||||
Plus(PlusProof),
|
||||
}
|
||||
|
||||
impl Bulletproof {
|
||||
fn bp_fields(plus: bool) -> usize {
|
||||
if plus {
|
||||
6
|
||||
} else {
|
||||
9
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the weight penalty for the Bulletproof(+).
|
||||
///
|
||||
/// Bulletproofs(+) are logarithmically sized yet linearly timed. Evaluating by their size alone
|
||||
/// accordingly doesn't properly represent the burden of the proof. Monero 'claws back' some of
|
||||
/// the weight lost by using a proof smaller than it is fast to compensate for this.
|
||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
||||
// src/cryptonote_basic/cryptonote_format_utils.cpp#L106-L124
|
||||
pub fn calculate_bp_clawback(plus: bool, n_outputs: usize) -> (usize, usize) {
|
||||
#[allow(non_snake_case)]
|
||||
let mut LR_len = 0;
|
||||
let mut n_padded_outputs = 1;
|
||||
while n_padded_outputs < n_outputs {
|
||||
LR_len += 1;
|
||||
n_padded_outputs = 1 << LR_len;
|
||||
}
|
||||
LR_len += LOG_COMMITMENT_BITS;
|
||||
|
||||
let mut bp_clawback = 0;
|
||||
if n_padded_outputs > 2 {
|
||||
let fields = Bulletproof::bp_fields(plus);
|
||||
let base = ((fields + (2 * (LOG_COMMITMENT_BITS + 1))) * 32) / 2;
|
||||
let size = (fields + (2 * LR_len)) * 32;
|
||||
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
|
||||
}
|
||||
|
||||
(bp_clawback, LR_len)
|
||||
}
|
||||
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof.
|
||||
pub fn prove<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: Vec<Commitment>,
|
||||
) -> Result<Bulletproof, BulletproofError> {
|
||||
if outputs.is_empty() {
|
||||
Err(BulletproofError::NoCommitments)?;
|
||||
}
|
||||
if outputs.len() > MAX_COMMITMENTS {
|
||||
Err(BulletproofError::TooManyCommitments)?;
|
||||
}
|
||||
let commitments = outputs.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
Ok(Bulletproof::Original(
|
||||
OriginalStatement::new(&commitments)
|
||||
.unwrap()
|
||||
.prove(rng, OriginalWitness::new(outputs).unwrap())
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof+.
|
||||
pub fn prove_plus<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: Vec<Commitment>,
|
||||
) -> Result<Bulletproof, BulletproofError> {
|
||||
if outputs.is_empty() {
|
||||
Err(BulletproofError::NoCommitments)?;
|
||||
}
|
||||
if outputs.len() > MAX_COMMITMENTS {
|
||||
Err(BulletproofError::TooManyCommitments)?;
|
||||
}
|
||||
let commitments = outputs.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
Ok(Bulletproof::Plus(
|
||||
PlusStatement::new(&commitments)
|
||||
.unwrap()
|
||||
.prove(rng, &Zeroizing::new(PlusWitness::new(outputs).unwrap()))
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Verify the given Bulletproof(+).
|
||||
#[must_use]
|
||||
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
|
||||
match self {
|
||||
Bulletproof::Original(bp) => {
|
||||
let mut verifier = BulletproofsBatchVerifier::default();
|
||||
let Some(statement) = OriginalStatement::new(commitments) else {
|
||||
return false;
|
||||
};
|
||||
if !statement.verify(rng, &mut verifier, bp.clone()) {
|
||||
return false;
|
||||
}
|
||||
verifier.verify()
|
||||
}
|
||||
Bulletproof::Plus(bp) => {
|
||||
let mut verifier = BulletproofsPlusBatchVerifier::default();
|
||||
let Some(statement) = PlusStatement::new(commitments) else {
|
||||
return false;
|
||||
};
|
||||
if !statement.verify(rng, &mut verifier, bp.clone()) {
|
||||
return false;
|
||||
}
|
||||
verifier.verify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulate the verification for the given Bulletproof(+) into the specified BatchVerifier.
|
||||
///
|
||||
/// Returns false if the Bulletproof(+) isn't sane, leaving the BatchVerifier in an undefined
|
||||
/// state.
|
||||
///
|
||||
/// Returns true if the Bulletproof(+) is sane, regardless of its validity.
|
||||
///
|
||||
/// The BatchVerifier must have its verification function executed to actually verify this proof.
|
||||
#[must_use]
|
||||
pub fn batch_verify<R: RngCore + CryptoRng>(
|
||||
&self,
|
||||
rng: &mut R,
|
||||
verifier: &mut BatchVerifier,
|
||||
commitments: &[EdwardsPoint],
|
||||
) -> bool {
|
||||
match self {
|
||||
Bulletproof::Original(bp) => {
|
||||
let Some(statement) = OriginalStatement::new(commitments) else {
|
||||
return false;
|
||||
};
|
||||
statement.verify(rng, &mut verifier.original, bp.clone())
|
||||
}
|
||||
Bulletproof::Plus(bp) => {
|
||||
let Some(statement) = PlusStatement::new(commitments) else {
|
||||
return false;
|
||||
};
|
||||
statement.verify(rng, &mut verifier.plus, bp.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_core<W: Write, F: Fn(&[EdwardsPoint], &mut W) -> io::Result<()>>(
|
||||
&self,
|
||||
w: &mut W,
|
||||
specific_write_vec: F,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
Bulletproof::Original(bp) => {
|
||||
write_point(&bp.A, w)?;
|
||||
write_point(&bp.S, w)?;
|
||||
write_point(&bp.T1, w)?;
|
||||
write_point(&bp.T2, w)?;
|
||||
write_scalar(&bp.tau_x, w)?;
|
||||
write_scalar(&bp.mu, w)?;
|
||||
specific_write_vec(&bp.ip.L, w)?;
|
||||
specific_write_vec(&bp.ip.R, w)?;
|
||||
write_scalar(&bp.ip.a, w)?;
|
||||
write_scalar(&bp.ip.b, w)?;
|
||||
write_scalar(&bp.t_hat, w)
|
||||
}
|
||||
|
||||
Bulletproof::Plus(bp) => {
|
||||
write_point(&bp.A, w)?;
|
||||
write_point(&bp.wip.A, w)?;
|
||||
write_point(&bp.wip.B, w)?;
|
||||
write_scalar(&bp.wip.r_answer, w)?;
|
||||
write_scalar(&bp.wip.s_answer, w)?;
|
||||
write_scalar(&bp.wip.delta_answer, w)?;
|
||||
specific_write_vec(&bp.wip.L, w)?;
|
||||
specific_write_vec(&bp.wip.R, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a Bulletproof(+) for the message signed by a transaction's signature.
|
||||
///
|
||||
/// This has a distinct encoding from the standard encoding.
|
||||
pub fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Write a Bulletproof(+).
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.write_core(w, |points, w| write_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Serialize a Bulletproof(+) to a `Vec<u8>`.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Read a Bulletproof.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
Ok(Bulletproof::Original(OriginalProof {
|
||||
A: read_point(r)?,
|
||||
S: read_point(r)?,
|
||||
T1: read_point(r)?,
|
||||
T2: read_point(r)?,
|
||||
tau_x: read_scalar(r)?,
|
||||
mu: read_scalar(r)?,
|
||||
ip: IpProof {
|
||||
L: read_vec(read_point, r)?,
|
||||
R: read_vec(read_point, r)?,
|
||||
a: read_scalar(r)?,
|
||||
b: read_scalar(r)?,
|
||||
},
|
||||
t_hat: read_scalar(r)?,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Read a Bulletproof+.
|
||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
Ok(Bulletproof::Plus(PlusProof {
|
||||
A: read_point(r)?,
|
||||
wip: WipProof {
|
||||
A: read_point(r)?,
|
||||
B: read_point(r)?,
|
||||
r_answer: read_scalar(r)?,
|
||||
s_answer: read_scalar(r)?,
|
||||
delta_answer: read_scalar(r)?,
|
||||
L: read_vec(read_point, r)?.into_iter().collect(),
|
||||
R: read_vec(read_point, r)?.into_iter().collect(),
|
||||
},
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,303 @@
|
||||
use std_shims::{vec, vec::Vec};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::{Scalar, EdwardsPoint};
|
||||
|
||||
use monero_generators::H;
|
||||
use monero_primitives::{INV_EIGHT, keccak256_to_scalar};
|
||||
use crate::{
|
||||
core::{multiexp_vartime, challenge_products},
|
||||
scalar_vector::ScalarVector,
|
||||
point_vector::PointVector,
|
||||
BulletproofsBatchVerifier,
|
||||
};
|
||||
|
||||
/// An error from proving/verifying Inner-Product statements.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub(crate) enum IpError {
|
||||
IncorrectAmountOfGenerators,
|
||||
DifferingLrLengths,
|
||||
}
|
||||
|
||||
/// The Bulletproofs Inner-Product statement.
|
||||
///
|
||||
/// This is for usage with Protocol 2 from the Bulletproofs paper.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct IpStatement {
|
||||
// Weights for h_bold
|
||||
h_bold_weights: ScalarVector,
|
||||
// u as the discrete logarithm of G
|
||||
u: Scalar,
|
||||
}
|
||||
|
||||
/// The witness for the Bulletproofs Inner-Product statement.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct IpWitness {
|
||||
// a
|
||||
a: ScalarVector,
|
||||
// b
|
||||
b: ScalarVector,
|
||||
}
|
||||
|
||||
impl IpWitness {
|
||||
/// Construct a new witness for an Inner-Product statement.
|
||||
///
|
||||
/// This functions return None if the lengths of a, b are mismatched, not a power of two, or are
|
||||
/// empty.
|
||||
pub(crate) fn new(a: ScalarVector, b: ScalarVector) -> Option<Self> {
|
||||
if a.0.is_empty() || (a.len() != b.len()) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let mut power_of_2 = 1;
|
||||
while power_of_2 < a.len() {
|
||||
power_of_2 <<= 1;
|
||||
}
|
||||
if power_of_2 != a.len() {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(Self { a, b })
|
||||
}
|
||||
}
|
||||
|
||||
/// A proof for the Bulletproofs Inner-Product statement.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) struct IpProof {
|
||||
pub(crate) L: Vec<EdwardsPoint>,
|
||||
pub(crate) R: Vec<EdwardsPoint>,
|
||||
pub(crate) a: Scalar,
|
||||
pub(crate) b: Scalar,
|
||||
}
|
||||
|
||||
impl IpStatement {
|
||||
/// Create a new Inner-Product statement which won't transcript P.
|
||||
///
|
||||
/// This MUST only be called when P is deterministic to already transcripted elements.
|
||||
pub(crate) fn new_without_P_transcript(h_bold_weights: ScalarVector, u: Scalar) -> Self {
|
||||
Self { h_bold_weights, u }
|
||||
}
|
||||
|
||||
// Transcript a round of the protocol
|
||||
fn transcript_L_R(transcript: Scalar, L: EdwardsPoint, R: EdwardsPoint) -> Scalar {
|
||||
let mut transcript = transcript.to_bytes().to_vec();
|
||||
transcript.extend(L.compress().to_bytes());
|
||||
transcript.extend(R.compress().to_bytes());
|
||||
keccak256_to_scalar(transcript)
|
||||
}
|
||||
|
||||
/// Prove for this Inner-Product statement.
|
||||
///
|
||||
/// Returns an error if this statement couldn't be proven for (such as if the witness isn't
|
||||
/// consistent).
|
||||
pub(crate) fn prove(
|
||||
self,
|
||||
mut transcript: Scalar,
|
||||
witness: IpWitness,
|
||||
) -> Result<IpProof, IpError> {
|
||||
let generators = crate::original::GENERATORS();
|
||||
let g_bold_slice = &generators.G[.. witness.a.len()];
|
||||
let h_bold_slice = &generators.H[.. witness.a.len()];
|
||||
|
||||
let (mut g_bold, mut h_bold, u, mut a, mut b) = {
|
||||
let IpStatement { h_bold_weights, u } = self;
|
||||
let u = H() * u;
|
||||
|
||||
// Ensure we have the exact amount of weights
|
||||
if h_bold_weights.len() != g_bold_slice.len() {
|
||||
Err(IpError::IncorrectAmountOfGenerators)?;
|
||||
}
|
||||
// Acquire a local copy of the generators
|
||||
let g_bold = PointVector(g_bold_slice.to_vec());
|
||||
let h_bold = PointVector(h_bold_slice.to_vec()).mul_vec(&h_bold_weights);
|
||||
|
||||
let IpWitness { a, b } = witness;
|
||||
|
||||
(g_bold, h_bold, u, a, b)
|
||||
};
|
||||
|
||||
let mut L_vec = vec![];
|
||||
let mut R_vec = vec![];
|
||||
|
||||
// `else: (n > 1)` case, lines 18-35 of the Bulletproofs paper
|
||||
// This interprets `g_bold.len()` as `n`
|
||||
while g_bold.len() > 1 {
|
||||
// Split a, b, g_bold, h_bold as needed for lines 20-24
|
||||
let (a1, a2) = a.clone().split();
|
||||
let (b1, b2) = b.clone().split();
|
||||
|
||||
let (g_bold1, g_bold2) = g_bold.split();
|
||||
let (h_bold1, h_bold2) = h_bold.split();
|
||||
|
||||
let n_hat = g_bold1.len();
|
||||
|
||||
// Sanity
|
||||
debug_assert_eq!(a1.len(), n_hat);
|
||||
debug_assert_eq!(a2.len(), n_hat);
|
||||
debug_assert_eq!(b1.len(), n_hat);
|
||||
debug_assert_eq!(b2.len(), n_hat);
|
||||
debug_assert_eq!(g_bold1.len(), n_hat);
|
||||
debug_assert_eq!(g_bold2.len(), n_hat);
|
||||
debug_assert_eq!(h_bold1.len(), n_hat);
|
||||
debug_assert_eq!(h_bold2.len(), n_hat);
|
||||
|
||||
// cl, cr, lines 21-22
|
||||
let cl = a1.clone().inner_product(&b2);
|
||||
let cr = a2.clone().inner_product(&b1);
|
||||
|
||||
let L = {
|
||||
let mut L_terms = Vec::with_capacity(1 + (2 * g_bold1.len()));
|
||||
for (a, g) in a1.0.iter().zip(g_bold2.0.iter()) {
|
||||
L_terms.push((*a, *g));
|
||||
}
|
||||
for (b, h) in b2.0.iter().zip(h_bold1.0.iter()) {
|
||||
L_terms.push((*b, *h));
|
||||
}
|
||||
L_terms.push((cl, u));
|
||||
// Uses vartime since this isn't a ZK proof
|
||||
multiexp_vartime(&L_terms)
|
||||
};
|
||||
L_vec.push(L * INV_EIGHT());
|
||||
|
||||
let R = {
|
||||
let mut R_terms = Vec::with_capacity(1 + (2 * g_bold1.len()));
|
||||
for (a, g) in a2.0.iter().zip(g_bold1.0.iter()) {
|
||||
R_terms.push((*a, *g));
|
||||
}
|
||||
for (b, h) in b1.0.iter().zip(h_bold2.0.iter()) {
|
||||
R_terms.push((*b, *h));
|
||||
}
|
||||
R_terms.push((cr, u));
|
||||
multiexp_vartime(&R_terms)
|
||||
};
|
||||
R_vec.push(R * INV_EIGHT());
|
||||
|
||||
// Now that we've calculate L, R, transcript them to receive x (26-27)
|
||||
transcript = Self::transcript_L_R(transcript, *L_vec.last().unwrap(), *R_vec.last().unwrap());
|
||||
let x = transcript;
|
||||
let x_inv = x.invert();
|
||||
|
||||
// The prover and verifier now calculate the following (28-31)
|
||||
g_bold = PointVector(Vec::with_capacity(g_bold1.len()));
|
||||
for (a, b) in g_bold1.0.into_iter().zip(g_bold2.0.into_iter()) {
|
||||
g_bold.0.push(multiexp_vartime(&[(x_inv, a), (x, b)]));
|
||||
}
|
||||
h_bold = PointVector(Vec::with_capacity(h_bold1.len()));
|
||||
for (a, b) in h_bold1.0.into_iter().zip(h_bold2.0.into_iter()) {
|
||||
h_bold.0.push(multiexp_vartime(&[(x, a), (x_inv, b)]));
|
||||
}
|
||||
|
||||
// 32-34
|
||||
a = (a1 * x) + &(a2 * x_inv);
|
||||
b = (b1 * x_inv) + &(b2 * x);
|
||||
}
|
||||
|
||||
// `if n = 1` case from line 14-17
|
||||
|
||||
// Sanity
|
||||
debug_assert_eq!(g_bold.len(), 1);
|
||||
debug_assert_eq!(h_bold.len(), 1);
|
||||
debug_assert_eq!(a.len(), 1);
|
||||
debug_assert_eq!(b.len(), 1);
|
||||
|
||||
// We simply send a/b
|
||||
Ok(IpProof { L: L_vec, R: R_vec, a: a[0], b: b[0] })
|
||||
}
|
||||
|
||||
/// Queue an Inner-Product proof for batch verification.
|
||||
///
|
||||
/// This will return Err if there is an error. This will return Ok if the proof was successfully
|
||||
/// queued for batch verification. The caller is required to verify the batch in order to ensure
|
||||
/// the proof is actually correct.
|
||||
pub(crate) fn verify(
|
||||
self,
|
||||
verifier: &mut BulletproofsBatchVerifier,
|
||||
ip_rows: usize,
|
||||
mut transcript: Scalar,
|
||||
verifier_weight: Scalar,
|
||||
proof: IpProof,
|
||||
) -> Result<(), IpError> {
|
||||
let generators = crate::original::GENERATORS();
|
||||
let g_bold_slice = &generators.G[.. ip_rows];
|
||||
let h_bold_slice = &generators.H[.. ip_rows];
|
||||
|
||||
let IpStatement { h_bold_weights, u } = self;
|
||||
|
||||
// Verify the L/R lengths
|
||||
{
|
||||
// Calculate the discrete log w.r.t. 2 for the amount of generators present
|
||||
let mut lr_len = 0;
|
||||
while (1 << lr_len) < g_bold_slice.len() {
|
||||
lr_len += 1;
|
||||
}
|
||||
|
||||
// This proof has less/more terms than the passed in generators are for
|
||||
if proof.L.len() != lr_len {
|
||||
Err(IpError::IncorrectAmountOfGenerators)?;
|
||||
}
|
||||
if proof.L.len() != proof.R.len() {
|
||||
Err(IpError::DifferingLrLengths)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Again, we start with the `else: (n > 1)` case
|
||||
|
||||
// We need x, x_inv per lines 25-27 for lines 28-31
|
||||
let mut xs = Vec::with_capacity(proof.L.len());
|
||||
for (L, R) in proof.L.iter().zip(proof.R.iter()) {
|
||||
transcript = Self::transcript_L_R(transcript, *L, *R);
|
||||
xs.push(transcript);
|
||||
}
|
||||
|
||||
// We calculate their inverse in batch
|
||||
let mut x_invs = xs.clone();
|
||||
Scalar::batch_invert(&mut x_invs);
|
||||
|
||||
// Now, with x and x_inv, we need to calculate g_bold', h_bold', P'
|
||||
//
|
||||
// For the sake of performance, we solely want to calculate all of these in terms of scalings
|
||||
// for g_bold, h_bold, P, and don't want to actually perform intermediary scalings of the
|
||||
// points
|
||||
//
|
||||
// L and R are easy, as it's simply x**2, x**-2
|
||||
//
|
||||
// For the series of g_bold, h_bold, we use the `challenge_products` function
|
||||
// For how that works, please see its own documentation
|
||||
let product_cache = {
|
||||
let mut challenges = Vec::with_capacity(proof.L.len());
|
||||
|
||||
let x_iter = xs.into_iter().zip(x_invs);
|
||||
let lr_iter = proof.L.into_iter().zip(proof.R);
|
||||
for ((x, x_inv), (L, R)) in x_iter.zip(lr_iter) {
|
||||
challenges.push((x, x_inv));
|
||||
verifier.0.other.push((verifier_weight * (x * x), L.mul_by_cofactor()));
|
||||
verifier.0.other.push((verifier_weight * (x_inv * x_inv), R.mul_by_cofactor()));
|
||||
}
|
||||
|
||||
challenge_products(&challenges)
|
||||
};
|
||||
|
||||
// And now for the `if n = 1` case
|
||||
let c = proof.a * proof.b;
|
||||
|
||||
// The multiexp of these terms equate to the final permutation of P
|
||||
// We now add terms for a * g_bold' + b * h_bold' b + c * u, with the scalars negative such
|
||||
// that the terms sum to 0 for an honest prover
|
||||
|
||||
// The g_bold * a term case from line 16
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0 .. g_bold_slice.len() {
|
||||
verifier.0.g_bold[i] -= verifier_weight * product_cache[i] * proof.a;
|
||||
}
|
||||
// The h_bold * b term case from line 16
|
||||
for i in 0 .. h_bold_slice.len() {
|
||||
verifier.0.h_bold[i] -=
|
||||
verifier_weight * product_cache[product_cache.len() - 1 - i] * proof.b * h_bold_weights[i];
|
||||
}
|
||||
// The c * u term case from line 16
|
||||
verifier.0.h -= verifier_weight * c * u;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
339
networks/monero/ringct/bulletproofs/src/original/mod.rs
Normal file
339
networks/monero/ringct/bulletproofs/src/original/mod.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
use std_shims::{sync::OnceLock, vec::Vec};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, Scalar, EdwardsPoint};
|
||||
|
||||
use monero_generators::{H, Generators, MAX_COMMITMENTS, COMMITMENT_BITS};
|
||||
use monero_primitives::{Commitment, INV_EIGHT, keccak256_to_scalar};
|
||||
use crate::{core::multiexp, scalar_vector::ScalarVector, BulletproofsBatchVerifier};
|
||||
|
||||
pub(crate) mod inner_product;
|
||||
use inner_product::*;
|
||||
pub(crate) use inner_product::IpProof;
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeStatement<'a> {
|
||||
commitments: &'a [EdwardsPoint],
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeWitness {
|
||||
commitments: Vec<Commitment>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRangeProof {
|
||||
pub(crate) A: EdwardsPoint,
|
||||
pub(crate) S: EdwardsPoint,
|
||||
pub(crate) T1: EdwardsPoint,
|
||||
pub(crate) T2: EdwardsPoint,
|
||||
pub(crate) tau_x: Scalar,
|
||||
pub(crate) mu: Scalar,
|
||||
pub(crate) t_hat: Scalar,
|
||||
pub(crate) ip: IpProof,
|
||||
}
|
||||
|
||||
impl<'a> AggregateRangeStatement<'a> {
|
||||
pub(crate) fn new(commitments: &'a [EdwardsPoint]) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
|
||||
None?;
|
||||
}
|
||||
Some(Self { commitments })
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregateRangeWitness {
|
||||
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
|
||||
None?;
|
||||
}
|
||||
Some(Self { commitments })
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> AggregateRangeStatement<'a> {
|
||||
fn initial_transcript(&self) -> (Scalar, Vec<EdwardsPoint>) {
|
||||
let V = self.commitments.iter().map(|c| c * INV_EIGHT()).collect::<Vec<_>>();
|
||||
(keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
|
||||
}
|
||||
|
||||
fn transcript_A_S(transcript: Scalar, A: EdwardsPoint, S: EdwardsPoint) -> (Scalar, Scalar) {
|
||||
let mut buf = Vec::with_capacity(96);
|
||||
buf.extend(transcript.to_bytes());
|
||||
buf.extend(A.compress().to_bytes());
|
||||
buf.extend(S.compress().to_bytes());
|
||||
let y = keccak256_to_scalar(buf);
|
||||
let z = keccak256_to_scalar(y.to_bytes());
|
||||
(y, z)
|
||||
}
|
||||
|
||||
fn transcript_T12(transcript: Scalar, T1: EdwardsPoint, T2: EdwardsPoint) -> Scalar {
|
||||
let mut buf = Vec::with_capacity(128);
|
||||
buf.extend(transcript.to_bytes());
|
||||
buf.extend(transcript.to_bytes());
|
||||
buf.extend(T1.compress().to_bytes());
|
||||
buf.extend(T2.compress().to_bytes());
|
||||
keccak256_to_scalar(buf)
|
||||
}
|
||||
|
||||
fn transcript_tau_x_mu_t_hat(
|
||||
transcript: Scalar,
|
||||
tau_x: Scalar,
|
||||
mu: Scalar,
|
||||
t_hat: Scalar,
|
||||
) -> Scalar {
|
||||
let mut buf = Vec::with_capacity(128);
|
||||
buf.extend(transcript.to_bytes());
|
||||
buf.extend(transcript.to_bytes());
|
||||
buf.extend(tau_x.to_bytes());
|
||||
buf.extend(mu.to_bytes());
|
||||
buf.extend(t_hat.to_bytes());
|
||||
keccak256_to_scalar(buf)
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub(crate) fn prove(
|
||||
self,
|
||||
rng: &mut (impl RngCore + CryptoRng),
|
||||
witness: AggregateRangeWitness,
|
||||
) -> Option<AggregateRangeProof> {
|
||||
if self.commitments != witness.commitments.iter().map(Commitment::calculate).collect::<Vec<_>>()
|
||||
{
|
||||
None?
|
||||
};
|
||||
|
||||
let generators = GENERATORS();
|
||||
|
||||
let (mut transcript, _) = self.initial_transcript();
|
||||
|
||||
// Find out the padded amount of commitments
|
||||
let mut padded_pow_of_2 = 1;
|
||||
while padded_pow_of_2 < witness.commitments.len() {
|
||||
padded_pow_of_2 <<= 1;
|
||||
}
|
||||
|
||||
let mut aL = ScalarVector::new(padded_pow_of_2 * COMMITMENT_BITS);
|
||||
for (i, commitment) in witness.commitments.iter().enumerate() {
|
||||
let mut amount = commitment.amount;
|
||||
for j in 0 .. COMMITMENT_BITS {
|
||||
aL[(i * COMMITMENT_BITS) + j] = Scalar::from(amount & 1);
|
||||
amount >>= 1;
|
||||
}
|
||||
}
|
||||
let aR = aL.clone() - Scalar::ONE;
|
||||
|
||||
let alpha = Scalar::random(&mut *rng);
|
||||
|
||||
let A = {
|
||||
let mut terms = Vec::with_capacity(1 + (2 * aL.len()));
|
||||
terms.push((alpha, ED25519_BASEPOINT_POINT));
|
||||
for (aL, G) in aL.0.iter().zip(&generators.G) {
|
||||
terms.push((*aL, *G));
|
||||
}
|
||||
for (aR, H) in aR.0.iter().zip(&generators.H) {
|
||||
terms.push((*aR, *H));
|
||||
}
|
||||
let res = multiexp(&terms) * INV_EIGHT();
|
||||
terms.zeroize();
|
||||
res
|
||||
};
|
||||
|
||||
let mut sL = ScalarVector::new(padded_pow_of_2 * COMMITMENT_BITS);
|
||||
let mut sR = ScalarVector::new(padded_pow_of_2 * COMMITMENT_BITS);
|
||||
for i in 0 .. (padded_pow_of_2 * COMMITMENT_BITS) {
|
||||
sL[i] = Scalar::random(&mut *rng);
|
||||
sR[i] = Scalar::random(&mut *rng);
|
||||
}
|
||||
let rho = Scalar::random(&mut *rng);
|
||||
|
||||
let S = {
|
||||
let mut terms = Vec::with_capacity(1 + (2 * sL.len()));
|
||||
terms.push((rho, ED25519_BASEPOINT_POINT));
|
||||
for (sL, G) in sL.0.iter().zip(&generators.G) {
|
||||
terms.push((*sL, *G));
|
||||
}
|
||||
for (sR, H) in sR.0.iter().zip(&generators.H) {
|
||||
terms.push((*sR, *H));
|
||||
}
|
||||
let res = multiexp(&terms) * INV_EIGHT();
|
||||
terms.zeroize();
|
||||
res
|
||||
};
|
||||
|
||||
let (y, z) = Self::transcript_A_S(transcript, A, S);
|
||||
transcript = z;
|
||||
let z = ScalarVector::powers(z, 3 + padded_pow_of_2);
|
||||
|
||||
let twos = ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS);
|
||||
|
||||
let l = [aL - z[1], sL];
|
||||
let y_pow_n = ScalarVector::powers(y, aR.len());
|
||||
let mut r = [((aR + z[1]) * &y_pow_n), sR * &y_pow_n];
|
||||
{
|
||||
for j in 0 .. padded_pow_of_2 {
|
||||
for i in 0 .. COMMITMENT_BITS {
|
||||
r[0].0[(j * COMMITMENT_BITS) + i] += z[2 + j] * twos[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
let t1 = (l[0].clone().inner_product(&r[1])) + (r[0].clone().inner_product(&l[1]));
|
||||
let t2 = l[1].clone().inner_product(&r[1]);
|
||||
|
||||
let tau_1 = Scalar::random(&mut *rng);
|
||||
let T1 = {
|
||||
let mut T1_terms = [(t1, H()), (tau_1, ED25519_BASEPOINT_POINT)];
|
||||
for term in &mut T1_terms {
|
||||
term.0 *= INV_EIGHT();
|
||||
}
|
||||
let T1 = multiexp(&T1_terms);
|
||||
T1_terms.zeroize();
|
||||
T1
|
||||
};
|
||||
let tau_2 = Scalar::random(&mut *rng);
|
||||
let T2 = {
|
||||
let mut T2_terms = [(t2, H()), (tau_2, ED25519_BASEPOINT_POINT)];
|
||||
for term in &mut T2_terms {
|
||||
term.0 *= INV_EIGHT();
|
||||
}
|
||||
let T2 = multiexp(&T2_terms);
|
||||
T2_terms.zeroize();
|
||||
T2
|
||||
};
|
||||
|
||||
transcript = Self::transcript_T12(transcript, T1, T2);
|
||||
let x = transcript;
|
||||
|
||||
let [l0, l1] = l;
|
||||
let l = l0 + &(l1 * x);
|
||||
let [r0, r1] = r;
|
||||
let r = r0 + &(r1 * x);
|
||||
let t_hat = l.clone().inner_product(&r);
|
||||
let mut tau_x = ((tau_2 * x) + tau_1) * x;
|
||||
{
|
||||
for (i, commitment) in witness.commitments.iter().enumerate() {
|
||||
tau_x += z[2 + i] * commitment.mask;
|
||||
}
|
||||
}
|
||||
let mu = alpha + (rho * x);
|
||||
|
||||
let y_inv_pow_n = ScalarVector::powers(y.invert(), l.len());
|
||||
|
||||
transcript = Self::transcript_tau_x_mu_t_hat(transcript, tau_x, mu, t_hat);
|
||||
let x_ip = transcript;
|
||||
|
||||
let ip = IpStatement::new_without_P_transcript(y_inv_pow_n, x_ip)
|
||||
.prove(transcript, IpWitness::new(l, r).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let res = AggregateRangeProof { A, S, T1, T2, tau_x, mu, t_hat, ip };
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let mut verifier = BulletproofsBatchVerifier::default();
|
||||
debug_assert!(self.verify(rng, &mut verifier, res.clone()));
|
||||
debug_assert!(verifier.verify());
|
||||
}
|
||||
Some(res)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub(crate) fn verify(
|
||||
self,
|
||||
rng: &mut (impl RngCore + CryptoRng),
|
||||
verifier: &mut BulletproofsBatchVerifier,
|
||||
mut proof: AggregateRangeProof,
|
||||
) -> bool {
|
||||
let mut padded_pow_of_2 = 1;
|
||||
while padded_pow_of_2 < self.commitments.len() {
|
||||
padded_pow_of_2 <<= 1;
|
||||
}
|
||||
let ip_rows = padded_pow_of_2 * COMMITMENT_BITS;
|
||||
|
||||
while verifier.0.g_bold.len() < ip_rows {
|
||||
verifier.0.g_bold.push(Scalar::ZERO);
|
||||
verifier.0.h_bold.push(Scalar::ZERO);
|
||||
}
|
||||
|
||||
let (mut transcript, mut commitments) = self.initial_transcript();
|
||||
for commitment in &mut commitments {
|
||||
*commitment = commitment.mul_by_cofactor();
|
||||
}
|
||||
|
||||
let (y, z) = Self::transcript_A_S(transcript, proof.A, proof.S);
|
||||
transcript = z;
|
||||
let z = ScalarVector::powers(z, 3 + padded_pow_of_2);
|
||||
transcript = Self::transcript_T12(transcript, proof.T1, proof.T2);
|
||||
let x = transcript;
|
||||
transcript = Self::transcript_tau_x_mu_t_hat(transcript, proof.tau_x, proof.mu, proof.t_hat);
|
||||
let x_ip = transcript;
|
||||
|
||||
proof.A = proof.A.mul_by_cofactor();
|
||||
proof.S = proof.S.mul_by_cofactor();
|
||||
proof.T1 = proof.T1.mul_by_cofactor();
|
||||
proof.T2 = proof.T2.mul_by_cofactor();
|
||||
|
||||
let y_pow_n = ScalarVector::powers(y, ip_rows);
|
||||
let y_inv_pow_n = ScalarVector::powers(y.invert(), ip_rows);
|
||||
|
||||
let twos = ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS);
|
||||
|
||||
// 65
|
||||
{
|
||||
let weight = Scalar::random(&mut *rng);
|
||||
verifier.0.h += weight * proof.t_hat;
|
||||
verifier.0.g += weight * proof.tau_x;
|
||||
|
||||
// Now that we've accumulated the lhs, negate the weight and accumulate the rhs
|
||||
// These will now sum to 0 if equal
|
||||
let weight = -weight;
|
||||
|
||||
verifier.0.h += weight * (z[1] - (z[2])) * y_pow_n.sum();
|
||||
|
||||
for (i, commitment) in commitments.iter().enumerate() {
|
||||
verifier.0.other.push((weight * z[2 + i], *commitment));
|
||||
}
|
||||
|
||||
for i in 0 .. padded_pow_of_2 {
|
||||
verifier.0.h -= weight * z[3 + i] * twos.clone().sum();
|
||||
}
|
||||
verifier.0.other.push((weight * x, proof.T1));
|
||||
verifier.0.other.push((weight * (x * x), proof.T2));
|
||||
}
|
||||
|
||||
let ip_weight = Scalar::random(&mut *rng);
|
||||
|
||||
// 66
|
||||
verifier.0.other.push((ip_weight, proof.A));
|
||||
verifier.0.other.push((ip_weight * x, proof.S));
|
||||
// We can replace these with a g_sum, h_sum scalar in the batch verifier
|
||||
// It'd trade `2 * ip_rows` scalar additions (per proof) for one scalar addition and an
|
||||
// additional term in the MSM
|
||||
let ip_z = ip_weight * z[1];
|
||||
for i in 0 .. ip_rows {
|
||||
verifier.0.h_bold[i] += ip_z;
|
||||
}
|
||||
let neg_ip_z = -ip_z;
|
||||
for i in 0 .. ip_rows {
|
||||
verifier.0.g_bold[i] += neg_ip_z;
|
||||
}
|
||||
{
|
||||
for j in 0 .. padded_pow_of_2 {
|
||||
for i in 0 .. COMMITMENT_BITS {
|
||||
let full_i = (j * COMMITMENT_BITS) + i;
|
||||
verifier.0.h_bold[full_i] += ip_weight * y_inv_pow_n[full_i] * z[2 + j] * twos[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
verifier.0.h += ip_weight * x_ip * proof.t_hat;
|
||||
|
||||
// 67, 68
|
||||
verifier.0.g += ip_weight * -proof.mu;
|
||||
let res = IpStatement::new_without_P_transcript(y_inv_pow_n, x_ip)
|
||||
.verify(verifier, ip_rows, transcript, ip_weight, proof.ip);
|
||||
res.is_ok()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,255 @@
|
||||
use std_shims::{vec, vec::Vec};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
|
||||
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
|
||||
|
||||
use crate::{
|
||||
batch_verifier::BulletproofsPlusBatchVerifier,
|
||||
core::{MAX_COMMITMENTS, COMMITMENT_BITS, multiexp, multiexp_vartime},
|
||||
plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, BpPlusGenerators,
|
||||
transcript::*,
|
||||
weighted_inner_product::{WipStatement, WipWitness, WipProof},
|
||||
padded_pow_of_2, u64_decompose,
|
||||
},
|
||||
};
|
||||
|
||||
// Figure 3 of the Bulletproofs+ Paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeStatement<'a> {
|
||||
generators: BpPlusGenerators,
|
||||
V: &'a [EdwardsPoint],
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
|
||||
|
||||
impl AggregateRangeWitness {
|
||||
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(AggregateRangeWitness(commitments))
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof+, as defined by Monero..
|
||||
#[doc(hidden)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRangeProof {
|
||||
pub(crate) A: EdwardsPoint,
|
||||
pub(crate) wip: WipProof,
|
||||
}
|
||||
|
||||
struct AHatComputation {
|
||||
y: Scalar,
|
||||
d_descending_y_plus_z: ScalarVector,
|
||||
y_mn_plus_one: Scalar,
|
||||
z: Scalar,
|
||||
z_pow: ScalarVector,
|
||||
A_hat: EdwardsPoint,
|
||||
}
|
||||
|
||||
impl<'a> AggregateRangeStatement<'a> {
|
||||
pub(crate) fn new(V: &'a [EdwardsPoint]) -> Option<Self> {
|
||||
if V.is_empty() || (V.len() > MAX_COMMITMENTS) {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Self { generators: BpPlusGenerators::new(), V })
|
||||
}
|
||||
|
||||
fn transcript_A(transcript: &mut Scalar, A: EdwardsPoint) -> (Scalar, Scalar) {
|
||||
let y = keccak256_to_scalar(
|
||||
[transcript.to_bytes().as_ref(), A.compress().to_bytes().as_ref()].concat(),
|
||||
);
|
||||
let z = keccak256_to_scalar(y.to_bytes().as_ref());
|
||||
*transcript = z;
|
||||
(y, z)
|
||||
}
|
||||
|
||||
fn d_j(j: usize, m: usize) -> ScalarVector {
|
||||
let mut d_j = Vec::with_capacity(m * COMMITMENT_BITS);
|
||||
for _ in 0 .. (j - 1) * COMMITMENT_BITS {
|
||||
d_j.push(Scalar::ZERO);
|
||||
}
|
||||
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS).0);
|
||||
for _ in 0 .. (m - j) * COMMITMENT_BITS {
|
||||
d_j.push(Scalar::ZERO);
|
||||
}
|
||||
ScalarVector(d_j)
|
||||
}
|
||||
|
||||
fn compute_A_hat(
|
||||
mut V: PointVector,
|
||||
generators: &BpPlusGenerators,
|
||||
transcript: &mut Scalar,
|
||||
mut A: EdwardsPoint,
|
||||
) -> AHatComputation {
|
||||
let (y, z) = Self::transcript_A(transcript, A);
|
||||
A = A.mul_by_cofactor();
|
||||
|
||||
while V.len() < padded_pow_of_2(V.len()) {
|
||||
V.0.push(EdwardsPoint::identity());
|
||||
}
|
||||
let mn = V.len() * COMMITMENT_BITS;
|
||||
|
||||
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
|
||||
let mut z_pow = Vec::with_capacity(V.len());
|
||||
// z**2
|
||||
z_pow.push(z * z);
|
||||
|
||||
let mut d = ScalarVector::new(mn);
|
||||
for j in 1 ..= V.len() {
|
||||
z_pow.push(*z_pow.last().unwrap() * z_pow[0]);
|
||||
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
||||
}
|
||||
|
||||
let mut ascending_y = ScalarVector(vec![y]);
|
||||
for i in 1 .. d.len() {
|
||||
ascending_y.0.push(ascending_y[i - 1] * y);
|
||||
}
|
||||
let y_pows = ascending_y.clone().sum();
|
||||
|
||||
let mut descending_y = ascending_y.clone();
|
||||
descending_y.0.reverse();
|
||||
|
||||
let d_descending_y = d.clone() * &descending_y;
|
||||
let d_descending_y_plus_z = d_descending_y + z;
|
||||
|
||||
let y_mn_plus_one = descending_y[0] * y;
|
||||
|
||||
let mut commitment_accum = EdwardsPoint::identity();
|
||||
for (j, commitment) in V.0.iter().enumerate() {
|
||||
commitment_accum += *commitment * z_pow[j];
|
||||
}
|
||||
|
||||
let neg_z = -z;
|
||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
||||
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
|
||||
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold, i)));
|
||||
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold, i)));
|
||||
}
|
||||
A_terms.push((y_mn_plus_one, commitment_accum));
|
||||
A_terms.push((
|
||||
((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * (z * z))),
|
||||
BpPlusGenerators::g(),
|
||||
));
|
||||
|
||||
AHatComputation {
|
||||
y,
|
||||
d_descending_y_plus_z,
|
||||
y_mn_plus_one,
|
||||
z,
|
||||
z_pow: ScalarVector(z_pow),
|
||||
A_hat: A + multiexp_vartime(&A_terms),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
witness: &AggregateRangeWitness,
|
||||
) -> Option<AggregateRangeProof> {
|
||||
// Check for consistency with the witness
|
||||
if self.V.len() != witness.0.len() {
|
||||
return None;
|
||||
}
|
||||
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
|
||||
if witness.calculate() != *commitment {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
let Self { generators, V } = self;
|
||||
// Monero expects all of these points to be torsion-free
|
||||
// Generally, for Bulletproofs, it sends points * INV_EIGHT and then performs a torsion clear
|
||||
// by multiplying by 8
|
||||
// This also restores the original value due to the preprocessing
|
||||
// Commitments aren't transmitted INV_EIGHT though, so this multiplies by INV_EIGHT to enable
|
||||
// clearing its cofactor without mutating the value
|
||||
// For some reason, these values are transcripted * INV_EIGHT, not as transmitted
|
||||
let V = V.iter().map(|V| V * INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
let mut V = V.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
|
||||
|
||||
// Pad V
|
||||
while V.len() < padded_pow_of_2(V.len()) {
|
||||
V.push(EdwardsPoint::identity());
|
||||
}
|
||||
|
||||
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
|
||||
|
||||
let mut d_js = Vec::with_capacity(V.len());
|
||||
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * COMMITMENT_BITS));
|
||||
for j in 1 ..= V.len() {
|
||||
d_js.push(Self::d_j(j, V.len()));
|
||||
#[allow(clippy::map_unwrap_or)]
|
||||
a_l.0.append(
|
||||
&mut u64_decompose(
|
||||
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
|
||||
)
|
||||
.0,
|
||||
);
|
||||
}
|
||||
|
||||
let a_r = a_l.clone() - Scalar::ONE;
|
||||
|
||||
let alpha = Scalar::random(&mut *rng);
|
||||
|
||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 1);
|
||||
for (i, a_l) in a_l.0.iter().enumerate() {
|
||||
A_terms.push((*a_l, generators.generator(GeneratorsList::GBold, i)));
|
||||
}
|
||||
for (i, a_r) in a_r.0.iter().enumerate() {
|
||||
A_terms.push((*a_r, generators.generator(GeneratorsList::HBold, i)));
|
||||
}
|
||||
A_terms.push((alpha, BpPlusGenerators::h()));
|
||||
let mut A = multiexp(&A_terms);
|
||||
A_terms.zeroize();
|
||||
|
||||
// Multiply by INV_EIGHT per earlier commentary
|
||||
A *= INV_EIGHT();
|
||||
|
||||
let AHatComputation { y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat } =
|
||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
||||
|
||||
let a_l = a_l - z;
|
||||
let a_r = a_r + &d_descending_y_plus_z;
|
||||
let mut alpha = alpha;
|
||||
for j in 1 ..= witness.0.len() {
|
||||
alpha += z_pow[j - 1] * witness.0[j - 1].mask * y_mn_plus_one;
|
||||
}
|
||||
|
||||
Some(AggregateRangeProof {
|
||||
A,
|
||||
wip: WipStatement::new(generators, A_hat, y)
|
||||
.prove(rng, transcript, &Zeroizing::new(WipWitness::new(a_l, a_r, alpha).unwrap()))
|
||||
.unwrap(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn verify<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
verifier: &mut BulletproofsPlusBatchVerifier,
|
||||
proof: AggregateRangeProof,
|
||||
) -> bool {
|
||||
let Self { generators, V } = self;
|
||||
|
||||
let V = V.iter().map(|V| V * INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
let V = V.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
|
||||
|
||||
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
|
||||
|
||||
let AHatComputation { y, A_hat, .. } =
|
||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, proof.A);
|
||||
WipStatement::new(generators, A_hat, y).verify(rng, verifier, transcript, proof.wip)
|
||||
}
|
||||
}
|
||||
81
networks/monero/ringct/bulletproofs/src/plus/mod.rs
Normal file
81
networks/monero/ringct/bulletproofs/src/plus/mod.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use std_shims::sync::OnceLock;
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_generators::{H, Generators};
|
||||
|
||||
pub(crate) use crate::{scalar_vector::ScalarVector, point_vector::PointVector};
|
||||
|
||||
pub(crate) mod transcript;
|
||||
pub(crate) mod weighted_inner_product;
|
||||
pub(crate) use weighted_inner_product::*;
|
||||
pub(crate) mod aggregate_range_proof;
|
||||
pub(crate) use aggregate_range_proof::*;
|
||||
|
||||
pub(crate) fn padded_pow_of_2(i: usize) -> usize {
|
||||
let mut next_pow_of_2 = 1;
|
||||
while next_pow_of_2 < i {
|
||||
next_pow_of_2 <<= 1;
|
||||
}
|
||||
next_pow_of_2
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub(crate) enum GeneratorsList {
|
||||
GBold,
|
||||
HBold,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct BpPlusGenerators {
|
||||
g_bold: &'static [EdwardsPoint],
|
||||
h_bold: &'static [EdwardsPoint],
|
||||
}
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/generators_plus.rs"));
|
||||
|
||||
impl BpPlusGenerators {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub(crate) fn new() -> Self {
|
||||
let gens = GENERATORS();
|
||||
BpPlusGenerators { g_bold: &gens.G, h_bold: &gens.H }
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.g_bold.len()
|
||||
}
|
||||
|
||||
pub(crate) fn g() -> EdwardsPoint {
|
||||
H()
|
||||
}
|
||||
|
||||
pub(crate) fn h() -> EdwardsPoint {
|
||||
ED25519_BASEPOINT_POINT
|
||||
}
|
||||
|
||||
pub(crate) fn generator(&self, list: GeneratorsList, i: usize) -> EdwardsPoint {
|
||||
match list {
|
||||
GeneratorsList::GBold => self.g_bold[i],
|
||||
GeneratorsList::HBold => self.h_bold[i],
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn reduce(&self, generators: usize) -> Self {
|
||||
// Round to the nearest power of 2
|
||||
let generators = padded_pow_of_2(generators);
|
||||
assert!(generators <= self.g_bold.len());
|
||||
|
||||
BpPlusGenerators { g_bold: &self.g_bold[.. generators], h_bold: &self.h_bold[.. generators] }
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the little-endian decomposition.
|
||||
fn u64_decompose(value: u64) -> ScalarVector {
|
||||
let mut bits = ScalarVector::new(64);
|
||||
for bit in 0 .. 64 {
|
||||
bits[bit] = Scalar::from((value >> bit) & 1);
|
||||
}
|
||||
bits
|
||||
}
|
||||
20
networks/monero/ringct/bulletproofs/src/plus/transcript.rs
Normal file
20
networks/monero/ringct/bulletproofs/src/plus/transcript.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use std_shims::{sync::OnceLock, vec::Vec};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_generators::hash_to_point;
|
||||
use monero_primitives::{keccak256, keccak256_to_scalar};
|
||||
|
||||
// Monero starts BP+ transcripts with the following constant.
|
||||
static TRANSCRIPT_CELL: OnceLock<[u8; 32]> = OnceLock::new();
|
||||
pub(crate) fn TRANSCRIPT() -> [u8; 32] {
|
||||
// Why this uses a hash_to_point is completely unknown.
|
||||
*TRANSCRIPT_CELL
|
||||
.get_or_init(|| hash_to_point(keccak256(b"bulletproof_plus_transcript")).compress().to_bytes())
|
||||
}
|
||||
|
||||
pub(crate) fn initial_transcript(commitments: core::slice::Iter<'_, EdwardsPoint>) -> Scalar {
|
||||
let commitments_hash =
|
||||
keccak256_to_scalar(commitments.flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>());
|
||||
keccak256_to_scalar([TRANSCRIPT().as_ref(), &commitments_hash.to_bytes()].concat())
|
||||
}
|
||||
@@ -0,0 +1,405 @@
|
||||
use std_shims::{vec, vec::Vec};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_primitives::{INV_EIGHT, keccak256_to_scalar};
|
||||
use crate::{
|
||||
core::{multiexp, multiexp_vartime, challenge_products},
|
||||
batch_verifier::BulletproofsPlusBatchVerifier,
|
||||
plus::{ScalarVector, PointVector, GeneratorsList, BpPlusGenerators, padded_pow_of_2},
|
||||
};
|
||||
|
||||
// Figure 1 of the Bulletproofs+ paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct WipStatement {
|
||||
generators: BpPlusGenerators,
|
||||
P: EdwardsPoint,
|
||||
y: ScalarVector,
|
||||
}
|
||||
|
||||
impl Zeroize for WipStatement {
|
||||
fn zeroize(&mut self) {
|
||||
self.P.zeroize();
|
||||
self.y.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct WipWitness {
|
||||
a: ScalarVector,
|
||||
b: ScalarVector,
|
||||
alpha: Scalar,
|
||||
}
|
||||
|
||||
impl WipWitness {
|
||||
pub(crate) fn new(mut a: ScalarVector, mut b: ScalarVector, alpha: Scalar) -> Option<Self> {
|
||||
if a.0.is_empty() || (a.len() != b.len()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Pad to the nearest power of 2
|
||||
let missing = padded_pow_of_2(a.len()) - a.len();
|
||||
a.0.reserve(missing);
|
||||
b.0.reserve(missing);
|
||||
for _ in 0 .. missing {
|
||||
a.0.push(Scalar::ZERO);
|
||||
b.0.push(Scalar::ZERO);
|
||||
}
|
||||
|
||||
Some(Self { a, b, alpha })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) struct WipProof {
|
||||
pub(crate) L: Vec<EdwardsPoint>,
|
||||
pub(crate) R: Vec<EdwardsPoint>,
|
||||
pub(crate) A: EdwardsPoint,
|
||||
pub(crate) B: EdwardsPoint,
|
||||
pub(crate) r_answer: Scalar,
|
||||
pub(crate) s_answer: Scalar,
|
||||
pub(crate) delta_answer: Scalar,
|
||||
}
|
||||
|
||||
impl WipStatement {
|
||||
pub(crate) fn new(generators: BpPlusGenerators, P: EdwardsPoint, y: Scalar) -> Self {
|
||||
debug_assert_eq!(generators.len(), padded_pow_of_2(generators.len()));
|
||||
|
||||
// y ** n
|
||||
let mut y_vec = ScalarVector::new(generators.len());
|
||||
y_vec[0] = y;
|
||||
for i in 1 .. y_vec.len() {
|
||||
y_vec[i] = y_vec[i - 1] * y;
|
||||
}
|
||||
|
||||
Self { generators, P, y: y_vec }
|
||||
}
|
||||
|
||||
fn transcript_L_R(transcript: &mut Scalar, L: EdwardsPoint, R: EdwardsPoint) -> Scalar {
|
||||
let e = keccak256_to_scalar(
|
||||
[
|
||||
transcript.to_bytes().as_ref(),
|
||||
L.compress().to_bytes().as_ref(),
|
||||
R.compress().to_bytes().as_ref(),
|
||||
]
|
||||
.concat(),
|
||||
);
|
||||
*transcript = e;
|
||||
e
|
||||
}
|
||||
|
||||
fn transcript_A_B(transcript: &mut Scalar, A: EdwardsPoint, B: EdwardsPoint) -> Scalar {
|
||||
let e = keccak256_to_scalar(
|
||||
[
|
||||
transcript.to_bytes().as_ref(),
|
||||
A.compress().to_bytes().as_ref(),
|
||||
B.compress().to_bytes().as_ref(),
|
||||
]
|
||||
.concat(),
|
||||
);
|
||||
*transcript = e;
|
||||
e
|
||||
}
|
||||
|
||||
// Prover's variant of the shared code block to calculate G/H/P when n > 1
|
||||
// Returns each permutation of G/H since the prover needs to do operation on each permutation
|
||||
// P is dropped as it's unused in the prover's path
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn next_G_H(
|
||||
transcript: &mut Scalar,
|
||||
mut g_bold1: PointVector,
|
||||
mut g_bold2: PointVector,
|
||||
mut h_bold1: PointVector,
|
||||
mut h_bold2: PointVector,
|
||||
L: EdwardsPoint,
|
||||
R: EdwardsPoint,
|
||||
y_inv_n_hat: Scalar,
|
||||
) -> (Scalar, Scalar, Scalar, Scalar, PointVector, PointVector) {
|
||||
debug_assert_eq!(g_bold1.len(), g_bold2.len());
|
||||
debug_assert_eq!(h_bold1.len(), h_bold2.len());
|
||||
debug_assert_eq!(g_bold1.len(), h_bold1.len());
|
||||
|
||||
let e = Self::transcript_L_R(transcript, L, R);
|
||||
let inv_e = e.invert();
|
||||
|
||||
// This vartime is safe as all of these arguments are public
|
||||
let mut new_g_bold = Vec::with_capacity(g_bold1.len());
|
||||
let e_y_inv = e * y_inv_n_hat;
|
||||
for g_bold in g_bold1.0.drain(..).zip(g_bold2.0.drain(..)) {
|
||||
new_g_bold.push(multiexp_vartime(&[(inv_e, g_bold.0), (e_y_inv, g_bold.1)]));
|
||||
}
|
||||
|
||||
let mut new_h_bold = Vec::with_capacity(h_bold1.len());
|
||||
for h_bold in h_bold1.0.drain(..).zip(h_bold2.0.drain(..)) {
|
||||
new_h_bold.push(multiexp_vartime(&[(e, h_bold.0), (inv_e, h_bold.1)]));
|
||||
}
|
||||
|
||||
let e_square = e * e;
|
||||
let inv_e_square = inv_e * inv_e;
|
||||
|
||||
(e, inv_e, e_square, inv_e_square, PointVector(new_g_bold), PointVector(new_h_bold))
|
||||
}
|
||||
|
||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
mut transcript: Scalar,
|
||||
witness: &WipWitness,
|
||||
) -> Option<WipProof> {
|
||||
let WipStatement { generators, P, mut y } = self;
|
||||
#[cfg(not(debug_assertions))]
|
||||
let _ = P;
|
||||
|
||||
if generators.len() != witness.a.len() {
|
||||
return None;
|
||||
}
|
||||
let (g, h) = (BpPlusGenerators::g(), BpPlusGenerators::h());
|
||||
let mut g_bold = vec![];
|
||||
let mut h_bold = vec![];
|
||||
for i in 0 .. generators.len() {
|
||||
g_bold.push(generators.generator(GeneratorsList::GBold, i));
|
||||
h_bold.push(generators.generator(GeneratorsList::HBold, i));
|
||||
}
|
||||
let mut g_bold = PointVector(g_bold);
|
||||
let mut h_bold = PointVector(h_bold);
|
||||
|
||||
let mut y_inv = {
|
||||
let mut i = 1;
|
||||
let mut to_invert = vec![];
|
||||
while i < g_bold.len() {
|
||||
to_invert.push(y[i - 1]);
|
||||
i *= 2;
|
||||
}
|
||||
Scalar::batch_invert(&mut to_invert);
|
||||
to_invert
|
||||
};
|
||||
|
||||
// Check P has the expected relationship
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let mut P_terms = witness
|
||||
.a
|
||||
.0
|
||||
.iter()
|
||||
.copied()
|
||||
.zip(g_bold.0.iter().copied())
|
||||
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
|
||||
P_terms.push((witness.alpha, h));
|
||||
debug_assert_eq!(multiexp(&P_terms), P);
|
||||
P_terms.zeroize();
|
||||
}
|
||||
|
||||
let mut a = witness.a.clone();
|
||||
let mut b = witness.b.clone();
|
||||
let mut alpha = witness.alpha;
|
||||
|
||||
// From here on, g_bold.len() is used as n
|
||||
debug_assert_eq!(g_bold.len(), a.len());
|
||||
|
||||
let mut L_vec = vec![];
|
||||
let mut R_vec = vec![];
|
||||
|
||||
// else n > 1 case from figure 1
|
||||
while g_bold.len() > 1 {
|
||||
let (a1, a2) = a.clone().split();
|
||||
let (b1, b2) = b.clone().split();
|
||||
let (g_bold1, g_bold2) = g_bold.split();
|
||||
let (h_bold1, h_bold2) = h_bold.split();
|
||||
|
||||
let n_hat = g_bold1.len();
|
||||
debug_assert_eq!(a1.len(), n_hat);
|
||||
debug_assert_eq!(a2.len(), n_hat);
|
||||
debug_assert_eq!(b1.len(), n_hat);
|
||||
debug_assert_eq!(b2.len(), n_hat);
|
||||
debug_assert_eq!(g_bold1.len(), n_hat);
|
||||
debug_assert_eq!(g_bold2.len(), n_hat);
|
||||
debug_assert_eq!(h_bold1.len(), n_hat);
|
||||
debug_assert_eq!(h_bold2.len(), n_hat);
|
||||
|
||||
let y_n_hat = y[n_hat - 1];
|
||||
y.0.truncate(n_hat);
|
||||
|
||||
let d_l = Scalar::random(&mut *rng);
|
||||
let d_r = Scalar::random(&mut *rng);
|
||||
|
||||
let c_l = a1.clone().weighted_inner_product(&b2, &y);
|
||||
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
|
||||
|
||||
let y_inv_n_hat = y_inv.pop().unwrap();
|
||||
|
||||
let mut L_terms = (a1.clone() * y_inv_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold2.0.iter().copied())
|
||||
.chain(b2.0.iter().copied().zip(h_bold1.0.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
L_terms.push((c_l, g));
|
||||
L_terms.push((d_l, h));
|
||||
let L = multiexp(&L_terms) * INV_EIGHT();
|
||||
L_vec.push(L);
|
||||
L_terms.zeroize();
|
||||
|
||||
let mut R_terms = (a2.clone() * y_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold1.0.iter().copied())
|
||||
.chain(b1.0.iter().copied().zip(h_bold2.0.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
R_terms.push((c_r, g));
|
||||
R_terms.push((d_r, h));
|
||||
let R = multiexp(&R_terms) * INV_EIGHT();
|
||||
R_vec.push(R);
|
||||
R_terms.zeroize();
|
||||
|
||||
let (e, inv_e, e_square, inv_e_square);
|
||||
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
||||
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
||||
|
||||
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
|
||||
b = (b1 * inv_e) + &(b2 * e);
|
||||
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
||||
|
||||
debug_assert_eq!(g_bold.len(), a.len());
|
||||
debug_assert_eq!(g_bold.len(), h_bold.len());
|
||||
debug_assert_eq!(g_bold.len(), b.len());
|
||||
}
|
||||
|
||||
// n == 1 case from figure 1
|
||||
debug_assert_eq!(g_bold.len(), 1);
|
||||
debug_assert_eq!(h_bold.len(), 1);
|
||||
|
||||
debug_assert_eq!(a.len(), 1);
|
||||
debug_assert_eq!(b.len(), 1);
|
||||
|
||||
let r = Scalar::random(&mut *rng);
|
||||
let s = Scalar::random(&mut *rng);
|
||||
let delta = Scalar::random(&mut *rng);
|
||||
let eta = Scalar::random(&mut *rng);
|
||||
|
||||
let ry = r * y[0];
|
||||
|
||||
let mut A_terms =
|
||||
vec![(r, g_bold[0]), (s, h_bold[0]), ((ry * b[0]) + (s * y[0] * a[0]), g), (delta, h)];
|
||||
let A = multiexp(&A_terms) * INV_EIGHT();
|
||||
A_terms.zeroize();
|
||||
|
||||
let mut B_terms = vec![(ry * s, g), (eta, h)];
|
||||
let B = multiexp(&B_terms) * INV_EIGHT();
|
||||
B_terms.zeroize();
|
||||
|
||||
let e = Self::transcript_A_B(&mut transcript, A, B);
|
||||
|
||||
let r_answer = r + (a[0] * e);
|
||||
let s_answer = s + (b[0] * e);
|
||||
let delta_answer = eta + (delta * e) + (alpha * (e * e));
|
||||
|
||||
Some(WipProof { L: L_vec, R: R_vec, A, B, r_answer, s_answer, delta_answer })
|
||||
}
|
||||
|
||||
pub(crate) fn verify<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
verifier: &mut BulletproofsPlusBatchVerifier,
|
||||
mut transcript: Scalar,
|
||||
mut proof: WipProof,
|
||||
) -> bool {
|
||||
let verifier_weight = Scalar::random(rng);
|
||||
|
||||
let WipStatement { generators, P, y } = self;
|
||||
|
||||
// Verify the L/R lengths
|
||||
{
|
||||
let mut lr_len = 0;
|
||||
while (1 << lr_len) < generators.len() {
|
||||
lr_len += 1;
|
||||
}
|
||||
if (proof.L.len() != lr_len) ||
|
||||
(proof.R.len() != lr_len) ||
|
||||
(generators.len() != (1 << lr_len))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let inv_y = {
|
||||
let inv_y = y[0].invert();
|
||||
let mut res = Vec::with_capacity(y.len());
|
||||
res.push(inv_y);
|
||||
while res.len() < y.len() {
|
||||
res.push(inv_y * res.last().unwrap());
|
||||
}
|
||||
res
|
||||
};
|
||||
|
||||
let mut e_is = Vec::with_capacity(proof.L.len());
|
||||
for (L, R) in proof.L.iter_mut().zip(proof.R.iter_mut()) {
|
||||
e_is.push(Self::transcript_L_R(&mut transcript, *L, *R));
|
||||
*L = L.mul_by_cofactor();
|
||||
*R = R.mul_by_cofactor();
|
||||
}
|
||||
|
||||
let e = Self::transcript_A_B(&mut transcript, proof.A, proof.B);
|
||||
proof.A = proof.A.mul_by_cofactor();
|
||||
proof.B = proof.B.mul_by_cofactor();
|
||||
let neg_e_square = verifier_weight * -(e * e);
|
||||
|
||||
verifier.0.other.push((neg_e_square, P));
|
||||
|
||||
let mut challenges = Vec::with_capacity(proof.L.len());
|
||||
let product_cache = {
|
||||
let mut inv_e_is = e_is.clone();
|
||||
Scalar::batch_invert(&mut inv_e_is);
|
||||
|
||||
debug_assert_eq!(e_is.len(), inv_e_is.len());
|
||||
debug_assert_eq!(e_is.len(), proof.L.len());
|
||||
debug_assert_eq!(e_is.len(), proof.R.len());
|
||||
for ((e_i, inv_e_i), (L, R)) in
|
||||
e_is.drain(..).zip(inv_e_is.drain(..)).zip(proof.L.iter().zip(proof.R.iter()))
|
||||
{
|
||||
debug_assert_eq!(e_i.invert(), inv_e_i);
|
||||
|
||||
challenges.push((e_i, inv_e_i));
|
||||
|
||||
let e_i_square = e_i * e_i;
|
||||
let inv_e_i_square = inv_e_i * inv_e_i;
|
||||
verifier.0.other.push((neg_e_square * e_i_square, *L));
|
||||
verifier.0.other.push((neg_e_square * inv_e_i_square, *R));
|
||||
}
|
||||
|
||||
challenge_products(&challenges)
|
||||
};
|
||||
|
||||
while verifier.0.g_bold.len() < generators.len() {
|
||||
verifier.0.g_bold.push(Scalar::ZERO);
|
||||
}
|
||||
while verifier.0.h_bold.len() < generators.len() {
|
||||
verifier.0.h_bold.push(Scalar::ZERO);
|
||||
}
|
||||
|
||||
let re = proof.r_answer * e;
|
||||
for i in 0 .. generators.len() {
|
||||
let mut scalar = product_cache[i] * re;
|
||||
if i > 0 {
|
||||
scalar *= inv_y[i - 1];
|
||||
}
|
||||
verifier.0.g_bold[i] += verifier_weight * scalar;
|
||||
}
|
||||
|
||||
let se = proof.s_answer * e;
|
||||
for i in 0 .. generators.len() {
|
||||
verifier.0.h_bold[i] += verifier_weight * (se * product_cache[product_cache.len() - 1 - i]);
|
||||
}
|
||||
|
||||
verifier.0.other.push((verifier_weight * -e, proof.A));
|
||||
verifier.0.g += verifier_weight * (proof.r_answer * y[0] * proof.s_answer);
|
||||
verifier.0.h += verifier_weight * proof.delta_answer;
|
||||
verifier.0.other.push((-verifier_weight, proof.B));
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
59
networks/monero/ringct/bulletproofs/src/point_vector.rs
Normal file
59
networks/monero/ringct/bulletproofs/src/point_vector.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
use core::ops::{Index, IndexMut};
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use crate::scalar_vector::ScalarVector;
|
||||
|
||||
#[cfg(test)]
|
||||
use crate::core::multiexp;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) struct PointVector(pub(crate) Vec<EdwardsPoint>);
|
||||
|
||||
impl Index<usize> for PointVector {
|
||||
type Output = EdwardsPoint;
|
||||
fn index(&self, index: usize) -> &EdwardsPoint {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<usize> for PointVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut EdwardsPoint {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl PointVector {
|
||||
pub(crate) fn mul_vec(&self, vector: &ScalarVector) -> Self {
|
||||
assert_eq!(self.len(), vector.len());
|
||||
let mut res = self.clone();
|
||||
for (i, val) in res.0.iter_mut().enumerate() {
|
||||
*val *= vector.0[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn multiexp(&self, vector: &ScalarVector) -> EdwardsPoint {
|
||||
debug_assert_eq!(self.len(), vector.len());
|
||||
let mut res = Vec::with_capacity(self.len());
|
||||
for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) {
|
||||
res.push((scalar, point));
|
||||
}
|
||||
multiexp(&res)
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, PointVector(r))
|
||||
}
|
||||
}
|
||||
138
networks/monero/ringct/bulletproofs/src/scalar_vector.rs
Normal file
138
networks/monero/ringct/bulletproofs/src/scalar_vector.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Index, IndexMut, Add, Sub, Mul},
|
||||
};
|
||||
use std_shims::{vec, vec::Vec};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use crate::core::multiexp;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||
|
||||
impl Index<usize> for ScalarVector {
|
||||
type Output = Scalar;
|
||||
fn index(&self, index: usize) -> &Scalar {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
impl IndexMut<usize> for ScalarVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s += scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s -= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s *= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s += o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Sub<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s -= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Mul<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s *= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
||||
type Output = EdwardsPoint;
|
||||
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
|
||||
let res = multiexp(&multiexp_args);
|
||||
multiexp_args.zeroize();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> Self {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
res.push(x);
|
||||
for i in 2 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
res.truncate(len);
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
|
||||
(self * vector).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
|
||||
(self * vector * y).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, ScalarVector(r))
|
||||
}
|
||||
}
|
||||
56
networks/monero/ringct/bulletproofs/src/tests/mod.rs
Normal file
56
networks/monero/ringct/bulletproofs/src/tests/mod.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
|
||||
use monero_primitives::Commitment;
|
||||
use crate::{batch_verifier::BatchVerifier, Bulletproof, BulletproofError};
|
||||
|
||||
mod original;
|
||||
mod plus;
|
||||
|
||||
macro_rules! bulletproofs_tests {
|
||||
($name: ident, $max: ident, $plus: literal) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
// Create Bulletproofs for all possible output quantities
|
||||
let mut verifier = BatchVerifier::new();
|
||||
for i in 1 ..= 16 {
|
||||
let commitments = (1 ..= i)
|
||||
.map(|_| Commitment::new(Scalar::random(&mut OsRng), OsRng.next_u64()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bp = if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, commitments.clone()).unwrap()
|
||||
};
|
||||
|
||||
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
assert!(bp.verify(&mut OsRng, &commitments));
|
||||
assert!(bp.batch_verify(&mut OsRng, &mut verifier, &commitments));
|
||||
}
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn $max() {
|
||||
// Check Bulletproofs errors if we try to prove for too many outputs
|
||||
let mut commitments = vec![];
|
||||
for _ in 0 .. 17 {
|
||||
commitments.push(Commitment::new(Scalar::ZERO, 0));
|
||||
}
|
||||
assert_eq!(
|
||||
(if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments)
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, commitments)
|
||||
})
|
||||
.unwrap_err(),
|
||||
BulletproofError::TooManyCommitments,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bulletproofs_tests!(bulletproofs, bulletproofs_max, false);
|
||||
bulletproofs_tests!(bulletproofs_plus, bulletproofs_plus_max, true);
|
||||
@@ -0,0 +1,75 @@
|
||||
// The inner product relation is P = sum(g_bold * a, h_bold * b, g * (a * b))
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use curve25519_dalek::Scalar;
|
||||
|
||||
use monero_generators::H;
|
||||
|
||||
use crate::{
|
||||
scalar_vector::ScalarVector,
|
||||
point_vector::PointVector,
|
||||
original::{
|
||||
GENERATORS,
|
||||
inner_product::{IpStatement, IpWitness},
|
||||
},
|
||||
BulletproofsBatchVerifier,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_zero_inner_product() {
|
||||
let statement =
|
||||
IpStatement::new_without_P_transcript(ScalarVector(vec![Scalar::ONE; 1]), Scalar::ONE);
|
||||
let witness = IpWitness::new(ScalarVector::new(1), ScalarVector::new(1)).unwrap();
|
||||
|
||||
let transcript = Scalar::random(&mut OsRng);
|
||||
let proof = statement.clone().prove(transcript, witness).unwrap();
|
||||
|
||||
let mut verifier = BulletproofsBatchVerifier::default();
|
||||
verifier.0.g_bold = vec![Scalar::ZERO; 1];
|
||||
verifier.0.h_bold = vec![Scalar::ZERO; 1];
|
||||
statement.verify(&mut verifier, 1, transcript, Scalar::random(&mut OsRng), proof).unwrap();
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inner_product() {
|
||||
// P = sum(g_bold * a, h_bold * b, g * u * <a, b>)
|
||||
let generators = GENERATORS();
|
||||
let mut verifier = BulletproofsBatchVerifier::default();
|
||||
verifier.0.g_bold = vec![Scalar::ZERO; 32];
|
||||
verifier.0.h_bold = vec![Scalar::ZERO; 32];
|
||||
for i in [1, 2, 4, 8, 16, 32] {
|
||||
let g = H();
|
||||
let mut g_bold = vec![];
|
||||
let mut h_bold = vec![];
|
||||
for i in 0 .. i {
|
||||
g_bold.push(generators.G[i]);
|
||||
h_bold.push(generators.H[i]);
|
||||
}
|
||||
let g_bold = PointVector(g_bold);
|
||||
let h_bold = PointVector(h_bold);
|
||||
|
||||
let mut a = ScalarVector::new(i);
|
||||
let mut b = ScalarVector::new(i);
|
||||
|
||||
for i in 0 .. i {
|
||||
a[i] = Scalar::random(&mut OsRng);
|
||||
b[i] = Scalar::random(&mut OsRng);
|
||||
}
|
||||
|
||||
let P = g_bold.multiexp(&a) + h_bold.multiexp(&b) + (g * a.clone().inner_product(&b));
|
||||
|
||||
let statement =
|
||||
IpStatement::new_without_P_transcript(ScalarVector(vec![Scalar::ONE; i]), Scalar::ONE);
|
||||
let witness = IpWitness::new(a, b).unwrap();
|
||||
|
||||
let transcript = Scalar::random(&mut OsRng);
|
||||
let proof = statement.clone().prove(transcript, witness).unwrap();
|
||||
|
||||
let weight = Scalar::random(&mut OsRng);
|
||||
verifier.0.other.push((weight, P));
|
||||
statement.verify(&mut verifier, i, transcript, weight, proof).unwrap();
|
||||
}
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
use hex_literal::hex;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
|
||||
use monero_io::decompress_point;
|
||||
|
||||
use crate::{
|
||||
original::{IpProof, AggregateRangeProof as OriginalProof},
|
||||
Bulletproof,
|
||||
};
|
||||
|
||||
mod inner_product;
|
||||
|
||||
#[test]
|
||||
fn bulletproofs_vector() {
|
||||
let scalar = |scalar| Scalar::from_canonical_bytes(scalar).unwrap();
|
||||
let point = |point| decompress_point(point).unwrap();
|
||||
|
||||
// Generated from Monero
|
||||
assert!(Bulletproof::Original(OriginalProof {
|
||||
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
|
||||
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
|
||||
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
|
||||
T2: point(hex!("b8390aa4b60b255630d40e592f55ec6b7ab5e3a96bfcdcd6f1cd1d2fc95f441e")),
|
||||
tau_x: scalar(hex!("5957dba8ea9afb23d6e81cc048a92f2d502c10c749dc1b2bd148ae8d41ec7107")),
|
||||
mu: scalar(hex!("923023b234c2e64774b820b4961f7181f6c1dc152c438643e5a25b0bf271bc02")),
|
||||
ip: IpProof {
|
||||
L: vec![
|
||||
point(hex!("c45f656316b9ebf9d357fb6a9f85b5f09e0b991dd50a6e0ae9b02de3946c9d99")),
|
||||
point(hex!("9304d2bf0f27183a2acc58cc755a0348da11bd345485fda41b872fee89e72aac")),
|
||||
point(hex!("1bb8b71925d155dd9569f64129ea049d6149fdc4e7a42a86d9478801d922129b")),
|
||||
point(hex!("5756a7bf887aa72b9a952f92f47182122e7b19d89e5dd434c747492b00e1c6b7")),
|
||||
point(hex!("6e497c910d102592830555356af5ff8340e8d141e3fb60ea24cfa587e964f07d")),
|
||||
point(hex!("f4fa3898e7b08e039183d444f3d55040f3c790ed806cb314de49f3068bdbb218")),
|
||||
point(hex!("0bbc37597c3ead517a3841e159c8b7b79a5ceaee24b2a9a20350127aab428713")),
|
||||
],
|
||||
R: vec![
|
||||
point(hex!("609420ba1702781692e84accfd225adb3d077aedc3cf8125563400466b52dbd9")),
|
||||
point(hex!("fb4e1d079e7a2b0ec14f7e2a3943bf50b6d60bc346a54fcf562fb234b342abf8")),
|
||||
point(hex!("6ae3ac97289c48ce95b9c557289e82a34932055f7f5e32720139824fe81b12e5")),
|
||||
point(hex!("d071cc2ffbdab2d840326ad15f68c01da6482271cae3cf644670d1632f29a15c")),
|
||||
point(hex!("e52a1754b95e1060589ba7ce0c43d0060820ebfc0d49dc52884bc3c65ad18af5")),
|
||||
point(hex!("41573b06140108539957df71aceb4b1816d2409ce896659aa5c86f037ca5e851")),
|
||||
point(hex!("a65970b2cc3c7b08b2b5b739dbc8e71e646783c41c625e2a5b1535e3d2e0f742")),
|
||||
],
|
||||
a: scalar(hex!("0077c5383dea44d3cd1bc74849376bd60679612dc4b945255822457fa0c0a209")),
|
||||
b: scalar(hex!("fe80cf5756473482581e1d38644007793ddc66fdeb9404ec1689a907e4863302")),
|
||||
},
|
||||
t_hat: scalar(hex!("40dfb08e09249040df997851db311bd6827c26e87d6f0f332c55be8eef10e603"))
|
||||
})
|
||||
.verify(
|
||||
&mut OsRng,
|
||||
&[
|
||||
// For some reason, these vectors are * INV_EIGHT
|
||||
point(hex!("8e8f23f315edae4f6c2f948d9a861e0ae32d356b933cd11d2f0e031ac744c41f"))
|
||||
.mul_by_cofactor(),
|
||||
point(hex!("2829cbd025aa54cd6e1b59a032564f22f0b2e5627f7f2c4297f90da438b5510f"))
|
||||
.mul_by_cofactor(),
|
||||
]
|
||||
));
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use curve25519_dalek::Scalar;
|
||||
|
||||
use monero_primitives::Commitment;
|
||||
|
||||
use crate::{
|
||||
batch_verifier::BulletproofsPlusBatchVerifier,
|
||||
plus::aggregate_range_proof::{AggregateRangeStatement, AggregateRangeWitness},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_range_proof() {
|
||||
let mut verifier = BulletproofsPlusBatchVerifier::default();
|
||||
for m in 1 ..= 16 {
|
||||
let mut commitments = vec![];
|
||||
for _ in 0 .. m {
|
||||
commitments.push(Commitment::new(Scalar::random(&mut OsRng), OsRng.next_u64()));
|
||||
}
|
||||
let commitment_points = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
let statement = AggregateRangeStatement::new(&commitment_points).unwrap();
|
||||
let witness = AggregateRangeWitness::new(commitments).unwrap();
|
||||
|
||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
||||
statement.verify(&mut OsRng, &mut verifier, proof);
|
||||
}
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
#[cfg(test)]
|
||||
mod weighted_inner_product;
|
||||
#[cfg(test)]
|
||||
mod aggregate_range_proof;
|
||||
@@ -0,0 +1,82 @@
|
||||
// The inner product relation is P = sum(g_bold * a, h_bold * b, g * (a * y * b), h * alpha)
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use crate::{
|
||||
batch_verifier::BulletproofsPlusBatchVerifier,
|
||||
plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, BpPlusGenerators,
|
||||
weighted_inner_product::{WipStatement, WipWitness},
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_zero_weighted_inner_product() {
|
||||
#[allow(non_snake_case)]
|
||||
let P = EdwardsPoint::identity();
|
||||
let y = Scalar::random(&mut OsRng);
|
||||
|
||||
let generators = BpPlusGenerators::new().reduce(1);
|
||||
let statement = WipStatement::new(generators, P, y);
|
||||
let witness = WipWitness::new(ScalarVector::new(1), ScalarVector::new(1), Scalar::ZERO).unwrap();
|
||||
|
||||
let transcript = Scalar::random(&mut OsRng);
|
||||
let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap();
|
||||
|
||||
let mut verifier = BulletproofsPlusBatchVerifier::default();
|
||||
statement.verify(&mut OsRng, &mut verifier, transcript, proof);
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_weighted_inner_product() {
|
||||
// P = sum(g_bold * a, h_bold * b, g * (a * y * b), h * alpha)
|
||||
let mut verifier = BulletproofsPlusBatchVerifier::default();
|
||||
let generators = BpPlusGenerators::new();
|
||||
for i in [1, 2, 4, 8, 16, 32] {
|
||||
let generators = generators.reduce(i);
|
||||
let g = BpPlusGenerators::g();
|
||||
let h = BpPlusGenerators::h();
|
||||
assert_eq!(generators.len(), i);
|
||||
let mut g_bold = vec![];
|
||||
let mut h_bold = vec![];
|
||||
for i in 0 .. i {
|
||||
g_bold.push(generators.generator(GeneratorsList::GBold, i));
|
||||
h_bold.push(generators.generator(GeneratorsList::HBold, i));
|
||||
}
|
||||
let g_bold = PointVector(g_bold);
|
||||
let h_bold = PointVector(h_bold);
|
||||
|
||||
let mut a = ScalarVector::new(i);
|
||||
let mut b = ScalarVector::new(i);
|
||||
let alpha = Scalar::random(&mut OsRng);
|
||||
|
||||
let y = Scalar::random(&mut OsRng);
|
||||
let mut y_vec = ScalarVector::new(g_bold.len());
|
||||
y_vec[0] = y;
|
||||
for i in 1 .. y_vec.len() {
|
||||
y_vec[i] = y_vec[i - 1] * y;
|
||||
}
|
||||
|
||||
for i in 0 .. i {
|
||||
a[i] = Scalar::random(&mut OsRng);
|
||||
b[i] = Scalar::random(&mut OsRng);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let P = g_bold.multiexp(&a) +
|
||||
h_bold.multiexp(&b) +
|
||||
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
|
||||
(h * alpha);
|
||||
|
||||
let statement = WipStatement::new(generators, P, y);
|
||||
let witness = WipWitness::new(a, b, alpha).unwrap();
|
||||
|
||||
let transcript = Scalar::random(&mut OsRng);
|
||||
let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap();
|
||||
statement.verify(&mut OsRng, &mut verifier, transcript, proof);
|
||||
}
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
65
networks/monero/ringct/clsag/Cargo.toml
Normal file
65
networks/monero/ringct/clsag/Cargo.toml
Normal file
@@ -0,0 +1,65 @@
|
||||
[package]
|
||||
name = "monero-clsag"
|
||||
version = "0.1.0"
|
||||
description = "The CLSAG linkable ring signature, as defined by the Monero protocol"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/ringct/clsag"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false }
|
||||
|
||||
thiserror = { version = "1", default-features = false, optional = true }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
|
||||
# Cryptographic dependencies
|
||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
|
||||
|
||||
# Multisig dependencies
|
||||
rand_chacha = { version = "0.3", default-features = false, optional = true }
|
||||
transcript = { package = "flexible-transcript", path = "../../../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
||||
group = { version = "0.13", default-features = false, optional = true }
|
||||
dalek-ff-group = { path = "../../../../crypto/dalek-ff-group", version = "0.4", default-features = false, optional = true }
|
||||
frost = { package = "modular-frost", path = "../../../../crypto/frost", default-features = false, features = ["ed25519"], optional = true }
|
||||
|
||||
# Other Monero dependencies
|
||||
monero-io = { path = "../../io", version = "0.1", default-features = false }
|
||||
monero-generators = { path = "../../generators", version = "0.4", default-features = false }
|
||||
monero-primitives = { path = "../../primitives", version = "0.1", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../../../crypto/frost", default-features = false, features = ["ed25519", "tests"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"thiserror",
|
||||
|
||||
"rand_core/std",
|
||||
"zeroize/std",
|
||||
"subtle/std",
|
||||
|
||||
"rand_chacha?/std",
|
||||
"transcript?/std",
|
||||
"group?/alloc",
|
||||
"dalek-ff-group?/std",
|
||||
|
||||
"monero-io/std",
|
||||
"monero-generators/std",
|
||||
"monero-primitives/std",
|
||||
]
|
||||
multisig = ["rand_chacha", "transcript", "group", "dalek-ff-group", "frost", "std"]
|
||||
default = ["std"]
|
||||
21
networks/monero/ringct/clsag/LICENSE
Normal file
21
networks/monero/ringct/clsag/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
networks/monero/ringct/clsag/README.md
Normal file
15
networks/monero/ringct/clsag/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Monero CLSAG
|
||||
|
||||
The CLSAG linkable ring signature, as defined by the Monero protocol.
|
||||
|
||||
Additionally included is a FROST-inspired threshold multisignature algorithm.
|
||||
|
||||
This library is usable under no-std when the `std` feature (on by default) is
|
||||
disabled.
|
||||
|
||||
### Cargo Features
|
||||
|
||||
- `std` (on by default): Enables `std` (and with it, more efficient internal
|
||||
implementations).
|
||||
- `multisig`: Provides a FROST-inspired threshold multisignature algorithm for
|
||||
use.
|
||||
400
networks/monero/ringct/clsag/src/lib.rs
Normal file
400
networks/monero/ringct/clsag/src/lib.rs
Normal file
@@ -0,0 +1,400 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use core::ops::Deref;
|
||||
use std_shims::{
|
||||
vec,
|
||||
vec::Vec,
|
||||
io::{self, Read, Write},
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
};
|
||||
|
||||
use monero_io::*;
|
||||
use monero_generators::hash_to_point;
|
||||
use monero_primitives::{INV_EIGHT, G_PRECOMP, Commitment, Decoys, keccak256_to_scalar};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::{ClsagMultisigMaskSender, ClsagAddendum, ClsagMultisig};
|
||||
|
||||
#[cfg(all(feature = "std", test))]
|
||||
mod tests;
|
||||
|
||||
/// Errors when working with CLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum ClsagError {
|
||||
/// The ring was invalid (such as being too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// The discrete logarithm of the key, scaling G, wasn't equivalent to the signing ring member.
|
||||
#[cfg_attr(feature = "std", error("invalid commitment"))]
|
||||
InvalidKey,
|
||||
/// The commitment opening provided did not match the ring member's.
|
||||
#[cfg_attr(feature = "std", error("invalid commitment"))]
|
||||
InvalidCommitment,
|
||||
/// The key image was invalid (such as being identity or torsioned)
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidImage,
|
||||
/// The `D` component was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid D"))]
|
||||
InvalidD,
|
||||
/// The `s` vector was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid s"))]
|
||||
InvalidS,
|
||||
/// The `c1` variable was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid c1"))]
|
||||
InvalidC1,
|
||||
}
|
||||
|
||||
/// Context on the input being signed for.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct ClsagContext {
|
||||
// The opening for the commitment of the signing ring member
|
||||
commitment: Commitment,
|
||||
// Selected ring members' positions, signer index, and ring
|
||||
decoys: Decoys,
|
||||
}
|
||||
|
||||
impl ClsagContext {
|
||||
/// Create a new context, as necessary for signing.
|
||||
pub fn new(decoys: Decoys, commitment: Commitment) -> Result<ClsagContext, ClsagError> {
|
||||
if decoys.len() > u8::MAX.into() {
|
||||
Err(ClsagError::InvalidRing)?;
|
||||
}
|
||||
|
||||
// Validate the commitment matches
|
||||
if decoys.signer_ring_members()[1] != commitment.calculate() {
|
||||
Err(ClsagError::InvalidCommitment)?;
|
||||
}
|
||||
|
||||
Ok(ClsagContext { commitment, decoys })
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
enum Mode {
|
||||
Sign(usize, EdwardsPoint, EdwardsPoint),
|
||||
Verify(Scalar),
|
||||
}
|
||||
|
||||
// Core of the CLSAG algorithm, applicable to both sign and verify with minimal differences
|
||||
//
|
||||
// Said differences are covered via the above Mode
|
||||
fn core(
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
I: &EdwardsPoint,
|
||||
pseudo_out: &EdwardsPoint,
|
||||
msg: &[u8; 32],
|
||||
D: &EdwardsPoint,
|
||||
s: &[Scalar],
|
||||
A_c1: &Mode,
|
||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||
let n = ring.len();
|
||||
|
||||
let images_precomp = match A_c1 {
|
||||
Mode::Sign(..) => None,
|
||||
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
|
||||
};
|
||||
let D_INV_EIGHT = D * INV_EIGHT();
|
||||
|
||||
// Generate the transcript
|
||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||
const PREFIX: &[u8] = b"CLSAG_";
|
||||
#[rustfmt::skip]
|
||||
const AGG_0: &[u8] = b"agg_0";
|
||||
#[rustfmt::skip]
|
||||
const ROUND: &[u8] = b"round";
|
||||
const PREFIX_AGG_0_LEN: usize = PREFIX.len() + AGG_0.len();
|
||||
|
||||
let mut to_hash = Vec::with_capacity(((2 * n) + 5) * 32);
|
||||
to_hash.extend(PREFIX);
|
||||
to_hash.extend(AGG_0);
|
||||
to_hash.extend([0; 32 - PREFIX_AGG_0_LEN]);
|
||||
|
||||
let mut P = Vec::with_capacity(n);
|
||||
for member in ring {
|
||||
P.push(member[0]);
|
||||
to_hash.extend(member[0].compress().to_bytes());
|
||||
}
|
||||
|
||||
let mut C = Vec::with_capacity(n);
|
||||
for member in ring {
|
||||
C.push(member[1] - pseudo_out);
|
||||
to_hash.extend(member[1].compress().to_bytes());
|
||||
}
|
||||
|
||||
to_hash.extend(I.compress().to_bytes());
|
||||
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
// mu_P with agg_0
|
||||
let mu_P = keccak256_to_scalar(&to_hash);
|
||||
// mu_C with agg_1
|
||||
to_hash[PREFIX_AGG_0_LEN - 1] = b'1';
|
||||
let mu_C = keccak256_to_scalar(&to_hash);
|
||||
|
||||
// Truncate it for the round transcript, altering the DST as needed
|
||||
to_hash.truncate(((2 * n) + 1) * 32);
|
||||
for i in 0 .. ROUND.len() {
|
||||
to_hash[PREFIX.len() + i] = ROUND[i];
|
||||
}
|
||||
// Unfortunately, it's I D pseudo_out instead of pseudo_out I D, meaning this needs to be
|
||||
// truncated just to add it back
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
to_hash.extend(msg);
|
||||
|
||||
// Configure the loop based on if we're signing or verifying
|
||||
let start;
|
||||
let end;
|
||||
let mut c;
|
||||
match A_c1 {
|
||||
Mode::Sign(r, A, AH) => {
|
||||
start = r + 1;
|
||||
end = r + n;
|
||||
to_hash.extend(A.compress().to_bytes());
|
||||
to_hash.extend(AH.compress().to_bytes());
|
||||
c = keccak256_to_scalar(&to_hash);
|
||||
}
|
||||
|
||||
Mode::Verify(c1) => {
|
||||
start = 0;
|
||||
end = n;
|
||||
c = *c1;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the core loop
|
||||
let mut c1 = c;
|
||||
for i in (start .. end).map(|i| i % n) {
|
||||
let c_p = mu_P * c;
|
||||
let c_c = mu_C * c;
|
||||
|
||||
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
|
||||
let L = match A_c1 {
|
||||
Mode::Sign(..) => {
|
||||
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
|
||||
}
|
||||
Mode::Verify(..) => {
|
||||
G_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
|
||||
}
|
||||
};
|
||||
|
||||
let PH = hash_to_point(P[i].compress().0);
|
||||
|
||||
// (c_p * I) + (c_c * D) + (s_i * PH)
|
||||
let R = match A_c1 {
|
||||
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
|
||||
Mode::Verify(..) => {
|
||||
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
|
||||
}
|
||||
};
|
||||
|
||||
to_hash.truncate(((2 * n) + 3) * 32);
|
||||
to_hash.extend(L.compress().to_bytes());
|
||||
to_hash.extend(R.compress().to_bytes());
|
||||
c = keccak256_to_scalar(&to_hash);
|
||||
|
||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||
// however
|
||||
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
||||
}
|
||||
|
||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
|
||||
}
|
||||
|
||||
/// The CLSAG signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Clsag {
|
||||
/// The difference of the commitment randomnesses, scaling the key image generator.
|
||||
pub D: EdwardsPoint,
|
||||
/// The responses for each ring member.
|
||||
pub s: Vec<Scalar>,
|
||||
/// The first challenge in the ring.
|
||||
pub c1: Scalar,
|
||||
}
|
||||
|
||||
struct ClsagSignCore {
|
||||
incomplete_clsag: Clsag,
|
||||
pseudo_out: EdwardsPoint,
|
||||
key_challenge: Scalar,
|
||||
challenged_mask: Scalar,
|
||||
}
|
||||
|
||||
impl Clsag {
|
||||
// Sign core is the extension of core as needed for signing, yet is shared between single signer
|
||||
// and multisig, hence why it's still core
|
||||
fn sign_core<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
I: &EdwardsPoint,
|
||||
input: &ClsagContext,
|
||||
mask: Scalar,
|
||||
msg: &[u8; 32],
|
||||
A: EdwardsPoint,
|
||||
AH: EdwardsPoint,
|
||||
) -> ClsagSignCore {
|
||||
let r: usize = input.decoys.signer_index().into();
|
||||
|
||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
||||
let mask_delta = input.commitment.mask - mask;
|
||||
|
||||
let H = hash_to_point(input.decoys.ring()[r][0].compress().0);
|
||||
let D = H * mask_delta;
|
||||
let mut s = Vec::with_capacity(input.decoys.ring().len());
|
||||
for _ in 0 .. input.decoys.ring().len() {
|
||||
s.push(Scalar::random(rng));
|
||||
}
|
||||
let ((D, c_p, c_c), c1) =
|
||||
core(input.decoys.ring(), I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH));
|
||||
|
||||
ClsagSignCore {
|
||||
incomplete_clsag: Clsag { D, s, c1 },
|
||||
pseudo_out,
|
||||
key_challenge: c_p,
|
||||
challenged_mask: c_c * mask_delta,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sign CLSAG signatures for the provided inputs.
|
||||
///
|
||||
/// Monero ensures the rerandomized input commitments have the same value as the outputs by
|
||||
/// checking `sum(rerandomized_input_commitments) - sum(output_commitments) == 0`. This requires
|
||||
/// not only the amounts balance, yet also
|
||||
/// `sum(input_commitment_masks) - sum(output_commitment_masks)`.
|
||||
///
|
||||
/// Monero solves this by following the wallet protocol to determine each output commitment's
|
||||
/// randomness, then using random masks for all but the last input. The last input is
|
||||
/// rerandomized to the necessary mask for the equation to balance.
|
||||
///
|
||||
/// Due to Monero having this behavior, it only makes sense to sign CLSAGs as a list, hence this
|
||||
/// API being the way it is.
|
||||
///
|
||||
/// `inputs` is of the form (discrete logarithm of the key, context).
|
||||
///
|
||||
/// `sum_outputs` is for the sum of the output commitments' masks.
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
mut inputs: Vec<(Zeroizing<Scalar>, ClsagContext)>,
|
||||
sum_outputs: Scalar,
|
||||
msg: [u8; 32],
|
||||
) -> Result<Vec<(Clsag, EdwardsPoint)>, ClsagError> {
|
||||
// Create the key images
|
||||
let mut key_image_generators = vec![];
|
||||
let mut key_images = vec![];
|
||||
for input in &inputs {
|
||||
let key = input.1.decoys.signer_ring_members()[0];
|
||||
|
||||
// Check the key is consistent
|
||||
if (ED25519_BASEPOINT_TABLE * input.0.deref()) != key {
|
||||
Err(ClsagError::InvalidKey)?;
|
||||
}
|
||||
|
||||
let key_image_generator = hash_to_point(key.compress().0);
|
||||
key_image_generators.push(key_image_generator);
|
||||
key_images.push(key_image_generator * input.0.deref());
|
||||
}
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
let mut sum_pseudo_outs = Scalar::ZERO;
|
||||
for i in 0 .. inputs.len() {
|
||||
let mask;
|
||||
// If this is the last input, set the mask as described above
|
||||
if i == (inputs.len() - 1) {
|
||||
mask = sum_outputs - sum_pseudo_outs;
|
||||
} else {
|
||||
mask = Scalar::random(rng);
|
||||
sum_pseudo_outs += mask;
|
||||
}
|
||||
|
||||
let mut nonce = Zeroizing::new(Scalar::random(rng));
|
||||
let ClsagSignCore { mut incomplete_clsag, pseudo_out, key_challenge, challenged_mask } =
|
||||
Clsag::sign_core(
|
||||
rng,
|
||||
&key_images[i],
|
||||
&inputs[i].1,
|
||||
mask,
|
||||
&msg,
|
||||
nonce.deref() * ED25519_BASEPOINT_TABLE,
|
||||
nonce.deref() * key_image_generators[i],
|
||||
);
|
||||
// Effectively r - c x, except c x is (c_p x) + (c_c z), where z is the delta between the
|
||||
// ring member's commitment and our pseudo-out commitment (which will only have a known
|
||||
// discrete log over G if the amounts cancel out)
|
||||
incomplete_clsag.s[usize::from(inputs[i].1.decoys.signer_index())] =
|
||||
nonce.deref() - ((key_challenge * inputs[i].0.deref()) + challenged_mask);
|
||||
let clsag = incomplete_clsag;
|
||||
|
||||
// Zeroize private keys and nonces.
|
||||
inputs[i].0.zeroize();
|
||||
nonce.zeroize();
|
||||
|
||||
debug_assert!(clsag
|
||||
.verify(inputs[i].1.decoys.ring(), &key_images[i], &pseudo_out, &msg)
|
||||
.is_ok());
|
||||
|
||||
res.push((clsag, pseudo_out));
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Verify a CLSAG signature for the provided context.
|
||||
pub fn verify(
|
||||
&self,
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
I: &EdwardsPoint,
|
||||
pseudo_out: &EdwardsPoint,
|
||||
msg: &[u8; 32],
|
||||
) -> Result<(), ClsagError> {
|
||||
// Preliminary checks
|
||||
// s, c1, and points must also be encoded canonically, which is checked at time of decode
|
||||
if ring.is_empty() {
|
||||
Err(ClsagError::InvalidRing)?;
|
||||
}
|
||||
if ring.len() != self.s.len() {
|
||||
Err(ClsagError::InvalidS)?;
|
||||
}
|
||||
if I.is_identity() || (!I.is_torsion_free()) {
|
||||
Err(ClsagError::InvalidImage)?;
|
||||
}
|
||||
|
||||
let D = self.D.mul_by_cofactor();
|
||||
if D.is_identity() {
|
||||
Err(ClsagError::InvalidD)?;
|
||||
}
|
||||
|
||||
let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, &Mode::Verify(self.c1));
|
||||
if c1 != self.c1 {
|
||||
Err(ClsagError::InvalidC1)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write a CLSAG.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_raw_vec(write_scalar, &self.s, w)?;
|
||||
w.write_all(&self.c1.to_bytes())?;
|
||||
write_point(&self.D, w)
|
||||
}
|
||||
|
||||
/// Read a CLSAG.
|
||||
pub fn read<R: Read>(decoys: usize, r: &mut R) -> io::Result<Clsag> {
|
||||
Ok(Clsag { s: read_raw_vec(read_scalar, decoys, r)?, c1: read_scalar(r)?, D: read_point(r)? })
|
||||
}
|
||||
}
|
||||
379
networks/monero/ringct/clsag/src/multisig.rs
Normal file
379
networks/monero/ringct/clsag/src/multisig.rs
Normal file
@@ -0,0 +1,379 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std_shims::{
|
||||
sync::{Arc, Mutex},
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use dalek_ff_group as dfg;
|
||||
use frost::{
|
||||
dkg::lagrange,
|
||||
curve::Ed25519,
|
||||
Participant, FrostError, ThresholdKeys, ThresholdView,
|
||||
algorithm::{WriteAddendum, Algorithm},
|
||||
};
|
||||
|
||||
use monero_generators::hash_to_point;
|
||||
|
||||
use crate::{ClsagContext, Clsag};
|
||||
|
||||
impl ClsagContext {
|
||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||
|
||||
// Ring index
|
||||
transcript.append_message(b"signer_index", [self.decoys.signer_index()]);
|
||||
|
||||
// Ring
|
||||
for (i, pair) in self.decoys.ring().iter().enumerate() {
|
||||
// Doesn't include global output indexes as CLSAG doesn't care/won't be affected by it
|
||||
// They're just a unreliable reference to this data which will be included in the message
|
||||
// if somehow relevant
|
||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
||||
// This also transcripts the key image generator since it's derived from this key
|
||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
||||
}
|
||||
|
||||
// Doesn't include the commitment's parts as the above ring + index includes the commitment
|
||||
// The only potential malleability would be if the G/H relationship is known, breaking the
|
||||
// discrete log problem, which breaks everything already
|
||||
}
|
||||
}
|
||||
|
||||
/// A channel to send the mask to use for the pseudo-out (rerandomized commitment) with.
|
||||
///
|
||||
/// A mask must be sent along this channel before any preprocess addendums are handled. Breaking
|
||||
/// this rule will cause a panic.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagMultisigMaskSender {
|
||||
buf: Arc<Mutex<Option<Scalar>>>,
|
||||
}
|
||||
#[derive(Clone, Debug)]
|
||||
struct ClsagMultisigMaskReceiver {
|
||||
buf: Arc<Mutex<Option<Scalar>>>,
|
||||
}
|
||||
impl ClsagMultisigMaskSender {
|
||||
fn new() -> (ClsagMultisigMaskSender, ClsagMultisigMaskReceiver) {
|
||||
let buf = Arc::new(Mutex::new(None));
|
||||
(ClsagMultisigMaskSender { buf: buf.clone() }, ClsagMultisigMaskReceiver { buf })
|
||||
}
|
||||
|
||||
/// Send a mask to a CLSAG multisig instance.
|
||||
pub fn send(self, mask: Scalar) {
|
||||
*self.buf.lock() = Some(mask);
|
||||
}
|
||||
}
|
||||
impl ClsagMultisigMaskReceiver {
|
||||
fn recv(self) -> Scalar {
|
||||
self.buf.lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Addendum produced during the signing process.
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
||||
pub struct ClsagAddendum {
|
||||
key_image_share: dfg::EdwardsPoint,
|
||||
}
|
||||
|
||||
impl ClsagAddendum {
|
||||
/// The key image share within this addendum.
|
||||
pub fn key_image_share(&self) -> dfg::EdwardsPoint {
|
||||
self.key_image_share
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteAddendum for ClsagAddendum {
|
||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(self.key_image_share.compress().to_bytes().as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
struct Interim {
|
||||
p: Scalar,
|
||||
c: Scalar,
|
||||
|
||||
clsag: Clsag,
|
||||
pseudo_out: EdwardsPoint,
|
||||
}
|
||||
|
||||
/// FROST-inspired algorithm for producing a CLSAG signature.
|
||||
///
|
||||
/// Before this has its `process_addendum` called, a mask must be set. Else this will panic.
|
||||
///
|
||||
/// The message signed is expected to be a 32-byte value. Per Monero, it's the keccak256 hash of
|
||||
/// the transaction data which is signed. This will panic if the message is not a 32-byte value.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagMultisig {
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
key_image_generator: EdwardsPoint,
|
||||
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
|
||||
image: Option<dfg::EdwardsPoint>,
|
||||
|
||||
context: ClsagContext,
|
||||
|
||||
mask_recv: Option<ClsagMultisigMaskReceiver>,
|
||||
mask: Option<Scalar>,
|
||||
|
||||
msg: Option<[u8; 32]>,
|
||||
interim: Option<Interim>,
|
||||
}
|
||||
|
||||
impl ClsagMultisig {
|
||||
/// Construct a new instance of multisignature CLSAG signing.
|
||||
pub fn new(
|
||||
transcript: RecommendedTranscript,
|
||||
context: ClsagContext,
|
||||
) -> (ClsagMultisig, ClsagMultisigMaskSender) {
|
||||
let (mask_send, mask_recv) = ClsagMultisigMaskSender::new();
|
||||
(
|
||||
ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
key_image_generator: hash_to_point(context.decoys.signer_ring_members()[0].compress().0),
|
||||
key_image_shares: HashMap::new(),
|
||||
image: None,
|
||||
|
||||
context,
|
||||
|
||||
mask_recv: Some(mask_recv),
|
||||
mask: None,
|
||||
|
||||
msg: None,
|
||||
interim: None,
|
||||
},
|
||||
mask_send,
|
||||
)
|
||||
}
|
||||
|
||||
/// The key image generator used by the signer.
|
||||
pub fn key_image_generator(&self) -> EdwardsPoint {
|
||||
self.key_image_generator
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
type Transcript = RecommendedTranscript;
|
||||
type Addendum = ClsagAddendum;
|
||||
// We output the CLSAG and the key image, which requires an interactive protocol to obtain
|
||||
type Signature = (Clsag, EdwardsPoint);
|
||||
|
||||
// We need the nonce represented against both G and the key image generator
|
||||
fn nonces(&self) -> Vec<Vec<dfg::EdwardsPoint>> {
|
||||
vec![vec![dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.key_image_generator)]]
|
||||
}
|
||||
|
||||
// We also publish our share of the key image
|
||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
_rng: &mut R,
|
||||
keys: &ThresholdKeys<Ed25519>,
|
||||
) -> ClsagAddendum {
|
||||
ClsagAddendum {
|
||||
key_image_share: dfg::EdwardsPoint(self.key_image_generator) * keys.secret_share().deref(),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
||||
let mut bytes = [0; 32];
|
||||
reader.read_exact(&mut bytes)?;
|
||||
// dfg ensures the point is torsion free
|
||||
let xH = Option::<dfg::EdwardsPoint>::from(dfg::EdwardsPoint::from_bytes(&bytes))
|
||||
.ok_or_else(|| io::Error::other("invalid key image"))?;
|
||||
// Ensure this is a canonical point
|
||||
if xH.to_bytes() != bytes {
|
||||
Err(io::Error::other("non-canonical key image"))?;
|
||||
}
|
||||
|
||||
Ok(ClsagAddendum { key_image_share: xH })
|
||||
}
|
||||
|
||||
fn process_addendum(
|
||||
&mut self,
|
||||
view: &ThresholdView<Ed25519>,
|
||||
l: Participant,
|
||||
addendum: ClsagAddendum,
|
||||
) -> Result<(), FrostError> {
|
||||
if self.image.is_none() {
|
||||
self.transcript.domain_separate(b"CLSAG");
|
||||
// Transcript the ring
|
||||
self.context.transcript(&mut self.transcript);
|
||||
// Fetch the mask from the Mutex
|
||||
// We set it to a variable to ensure our view of it is consistent
|
||||
// It was this or a mpsc channel... std doesn't have oneshot :/
|
||||
self.mask = Some(self.mask_recv.take().unwrap().recv());
|
||||
// Transcript the mask
|
||||
self.transcript.append_message(b"mask", self.mask.expect("mask wasn't set").to_bytes());
|
||||
|
||||
// Init the image to the offset
|
||||
self.image = Some(dfg::EdwardsPoint(self.key_image_generator) * view.offset());
|
||||
}
|
||||
|
||||
// Transcript this participant's contribution
|
||||
self.transcript.append_message(b"participant", l.to_bytes());
|
||||
self
|
||||
.transcript
|
||||
.append_message(b"key_image_share", addendum.key_image_share.compress().to_bytes());
|
||||
|
||||
// Accumulate the interpolated share
|
||||
let interpolated_key_image_share =
|
||||
addendum.key_image_share * lagrange::<dfg::Scalar>(l, view.included());
|
||||
*self.image.as_mut().unwrap() += interpolated_key_image_share;
|
||||
|
||||
self
|
||||
.key_image_shares
|
||||
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn transcript(&mut self) -> &mut Self::Transcript {
|
||||
&mut self.transcript
|
||||
}
|
||||
|
||||
fn sign_share(
|
||||
&mut self,
|
||||
view: &ThresholdView<Ed25519>,
|
||||
nonce_sums: &[Vec<dfg::EdwardsPoint>],
|
||||
nonces: Vec<Zeroizing<dfg::Scalar>>,
|
||||
msg: &[u8],
|
||||
) -> dfg::Scalar {
|
||||
// Use the transcript to get a seeded random number generator
|
||||
//
|
||||
// The transcript contains private data, preventing passive adversaries from recreating this
|
||||
// process even if they have access to the commitments/key image share broadcast so far
|
||||
//
|
||||
// Specifically, the transcript contains the signer's index within the ring, along with the
|
||||
// opening of the commitment being re-randomized (and what it's re-randomized to)
|
||||
let mut rng = ChaCha20Rng::from_seed(self.transcript.rng_seed(b"decoy_responses"));
|
||||
|
||||
self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes"));
|
||||
|
||||
let sign_core = Clsag::sign_core(
|
||||
&mut rng,
|
||||
&self.image.expect("verifying a share despite never processing any addendums").0,
|
||||
&self.context,
|
||||
self.mask.expect("mask wasn't set"),
|
||||
self.msg.as_ref().unwrap(),
|
||||
nonce_sums[0][0].0,
|
||||
nonce_sums[0][1].0,
|
||||
);
|
||||
self.interim = Some(Interim {
|
||||
p: sign_core.key_challenge,
|
||||
c: sign_core.challenged_mask,
|
||||
clsag: sign_core.incomplete_clsag,
|
||||
pseudo_out: sign_core.pseudo_out,
|
||||
});
|
||||
|
||||
// r - p x, where p is the challenge for the keys
|
||||
*nonces[0] - dfg::Scalar(sign_core.key_challenge) * view.secret_share().deref()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify(
|
||||
&self,
|
||||
_: dfg::EdwardsPoint,
|
||||
_: &[Vec<dfg::EdwardsPoint>],
|
||||
sum: dfg::Scalar,
|
||||
) -> Option<Self::Signature> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
let mut clsag = interim.clsag.clone();
|
||||
// We produced shares as `r - p x`, yet the signature is actually `r - p x - c x`
|
||||
// Substract `c x` (saved as `c`) now
|
||||
clsag.s[usize::from(self.context.decoys.signer_index())] = sum.0 - interim.c;
|
||||
if clsag
|
||||
.verify(
|
||||
self.context.decoys.ring(),
|
||||
&self.image.expect("verifying a signature despite never processing any addendums").0,
|
||||
&interim.pseudo_out,
|
||||
self.msg.as_ref().unwrap(),
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
return Some((clsag, interim.pseudo_out));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn verify_share(
|
||||
&self,
|
||||
verification_share: dfg::EdwardsPoint,
|
||||
nonces: &[Vec<dfg::EdwardsPoint>],
|
||||
share: dfg::Scalar,
|
||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
|
||||
// For a share `r - p x`, the following two equalities should hold:
|
||||
// - `(r - p x)G == R.0 - pV`, where `V = xG`
|
||||
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
|
||||
//
|
||||
// This is effectively a discrete log equality proof for:
|
||||
// V, K over G, H
|
||||
// with nonces
|
||||
// R.0, R.1
|
||||
// and solution
|
||||
// s
|
||||
//
|
||||
// Which is a batch-verifiable rewrite of the traditional CP93 proof
|
||||
// (and also writable as Generalized Schnorr Protocol)
|
||||
//
|
||||
// That means that given a proper challenge, this alone can be certainly argued to prove the
|
||||
// key image share is well-formed and the provided signature so proves for that.
|
||||
|
||||
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
|
||||
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
|
||||
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
|
||||
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
|
||||
|
||||
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
|
||||
|
||||
// Hash every variable relevant here, using the hash output as the random weight
|
||||
let mut weight_transcript =
|
||||
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
|
||||
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
|
||||
weight_transcript.append_message(b"H", self.key_image_generator.to_bytes());
|
||||
weight_transcript.append_message(b"xG", verification_share.to_bytes());
|
||||
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
|
||||
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
|
||||
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
|
||||
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
|
||||
weight_transcript.append_message(b"s", share.to_repr());
|
||||
let weight = weight_transcript.challenge(b"weight");
|
||||
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
|
||||
|
||||
let part_one = vec![
|
||||
(share, dfg::EdwardsPoint::generator()),
|
||||
// -(R.0 - pV) == -R.0 + pV
|
||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
];
|
||||
|
||||
let mut part_two = vec![
|
||||
(weight * share, dfg::EdwardsPoint(self.key_image_generator)),
|
||||
// -(R.1 - pK) == -R.1 + pK
|
||||
(-weight, nonces[0][1]),
|
||||
(weight * dfg::Scalar(interim.p), key_image_share),
|
||||
];
|
||||
|
||||
let mut all = part_one;
|
||||
all.append(&mut part_two);
|
||||
Ok(all)
|
||||
}
|
||||
}
|
||||
119
networks/monero/ringct/clsag/src/tests.rs
Normal file
119
networks/monero/ringct/clsag/src/tests.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
use core::ops::Deref;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::curve::Ed25519;
|
||||
|
||||
use monero_generators::hash_to_point;
|
||||
use monero_primitives::{Commitment, Decoys};
|
||||
use crate::{ClsagContext, Clsag};
|
||||
#[cfg(feature = "multisig")]
|
||||
use crate::ClsagMultisig;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::{
|
||||
Participant,
|
||||
tests::{key_gen, algorithm_machines, sign},
|
||||
};
|
||||
|
||||
const RING_LEN: u64 = 11;
|
||||
const AMOUNT: u64 = 1337;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
const RING_INDEX: u8 = 3;
|
||||
|
||||
#[test]
|
||||
fn clsag() {
|
||||
for real in 0 .. RING_LEN {
|
||||
let msg = [1; 32];
|
||||
|
||||
let mut secrets = (Zeroizing::new(Scalar::ZERO), Scalar::ZERO);
|
||||
let mut ring = vec![];
|
||||
for i in 0 .. RING_LEN {
|
||||
let dest = Zeroizing::new(Scalar::random(&mut OsRng));
|
||||
let mask = Scalar::random(&mut OsRng);
|
||||
let amount;
|
||||
if i == real {
|
||||
secrets = (dest.clone(), mask);
|
||||
amount = AMOUNT;
|
||||
} else {
|
||||
amount = OsRng.next_u64();
|
||||
}
|
||||
ring
|
||||
.push([dest.deref() * ED25519_BASEPOINT_TABLE, Commitment::new(mask, amount).calculate()]);
|
||||
}
|
||||
|
||||
let (mut clsag, pseudo_out) = Clsag::sign(
|
||||
&mut OsRng,
|
||||
vec![(
|
||||
secrets.0.clone(),
|
||||
ClsagContext::new(
|
||||
Decoys::new((1 ..= RING_LEN).collect(), u8::try_from(real).unwrap(), ring.clone())
|
||||
.unwrap(),
|
||||
Commitment::new(secrets.1, AMOUNT),
|
||||
)
|
||||
.unwrap(),
|
||||
)],
|
||||
Scalar::random(&mut OsRng),
|
||||
msg,
|
||||
)
|
||||
.unwrap()
|
||||
.swap_remove(0);
|
||||
|
||||
let image =
|
||||
hash_to_point((ED25519_BASEPOINT_TABLE * secrets.0.deref()).compress().0) * secrets.0.deref();
|
||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||
|
||||
// make sure verification fails if we throw a random `c1` at it.
|
||||
clsag.c1 = Scalar::random(&mut OsRng);
|
||||
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
#[test]
|
||||
fn clsag_multisig() {
|
||||
let keys = key_gen::<_, Ed25519>(&mut OsRng);
|
||||
|
||||
let randomness = Scalar::random(&mut OsRng);
|
||||
let mut ring = vec![];
|
||||
for i in 0 .. RING_LEN {
|
||||
let dest;
|
||||
let mask;
|
||||
let amount;
|
||||
if i != u64::from(RING_INDEX) {
|
||||
dest = &Scalar::random(&mut OsRng) * ED25519_BASEPOINT_TABLE;
|
||||
mask = Scalar::random(&mut OsRng);
|
||||
amount = OsRng.next_u64();
|
||||
} else {
|
||||
dest = keys[&Participant::new(1).unwrap()].group_key().0;
|
||||
mask = randomness;
|
||||
amount = AMOUNT;
|
||||
}
|
||||
ring.push([dest, Commitment::new(mask, amount).calculate()]);
|
||||
}
|
||||
|
||||
let (algorithm, mask_send) = ClsagMultisig::new(
|
||||
RecommendedTranscript::new(b"Monero Serai CLSAG Test"),
|
||||
ClsagContext::new(
|
||||
Decoys::new((1 ..= RING_LEN).collect(), RING_INDEX, ring.clone()).unwrap(),
|
||||
Commitment::new(randomness, AMOUNT),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
mask_send.send(Scalar::random(&mut OsRng));
|
||||
|
||||
sign(
|
||||
&mut OsRng,
|
||||
&algorithm,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algorithm, &keys),
|
||||
&[1; 32],
|
||||
);
|
||||
}
|
||||
45
networks/monero/ringct/mlsag/Cargo.toml
Normal file
45
networks/monero/ringct/mlsag/Cargo.toml
Normal file
@@ -0,0 +1,45 @@
|
||||
[package]
|
||||
name = "monero-mlsag"
|
||||
version = "0.1.0"
|
||||
description = "The MLSAG linkable ring signature, as defined by the Monero protocol"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/ringct/mlsag"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false }
|
||||
|
||||
thiserror = { version = "1", default-features = false, optional = true }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
# Cryptographic dependencies
|
||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
|
||||
|
||||
# Other Monero dependencies
|
||||
monero-io = { path = "../../io", version = "0.1", default-features = false }
|
||||
monero-generators = { path = "../../generators", version = "0.4", default-features = false }
|
||||
monero-primitives = { path = "../../primitives", version = "0.1", default-features = false }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"thiserror",
|
||||
|
||||
"zeroize/std",
|
||||
|
||||
"monero-io/std",
|
||||
"monero-generators/std",
|
||||
"monero-primitives/std",
|
||||
]
|
||||
default = ["std"]
|
||||
21
networks/monero/ringct/mlsag/LICENSE
Normal file
21
networks/monero/ringct/mlsag/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
networks/monero/ringct/mlsag/README.md
Normal file
11
networks/monero/ringct/mlsag/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Monero MLSAG
|
||||
|
||||
The MLSAG linkable ring signature, as defined by the Monero protocol.
|
||||
|
||||
This library is usable under no-std when the `std` feature (on by default) is
|
||||
disabled.
|
||||
|
||||
### Cargo Features
|
||||
|
||||
- `std` (on by default): Enables `std` (and with it, more efficient internal
|
||||
implementations).
|
||||
238
networks/monero/ringct/mlsag/src/lib.rs
Normal file
238
networks/monero/ringct/mlsag/src/lib.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use std_shims::{
|
||||
vec,
|
||||
vec::Vec,
|
||||
io::{self, Read, Write},
|
||||
};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::{traits::IsIdentity, Scalar, EdwardsPoint};
|
||||
|
||||
use monero_io::*;
|
||||
use monero_generators::{H, hash_to_point};
|
||||
use monero_primitives::keccak256_to_scalar;
|
||||
|
||||
/// Errors when working with MLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum MlsagError {
|
||||
/// Invalid ring (such as too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// Invalid amount of key images.
|
||||
#[cfg_attr(feature = "std", error("invalid amount of key images"))]
|
||||
InvalidAmountOfKeyImages,
|
||||
/// Invalid ss matrix.
|
||||
#[cfg_attr(feature = "std", error("invalid ss"))]
|
||||
InvalidSs,
|
||||
/// Invalid key image.
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidKeyImage,
|
||||
/// Invalid ci vector.
|
||||
#[cfg_attr(feature = "std", error("invalid ci"))]
|
||||
InvalidCi,
|
||||
}
|
||||
|
||||
/// A vector of rings, forming a matrix, to verify the MLSAG with.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingMatrix {
|
||||
matrix: Vec<Vec<EdwardsPoint>>,
|
||||
}
|
||||
|
||||
impl RingMatrix {
|
||||
/// Construct a ring matrix from an already formatted series of points.
|
||||
fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
// Monero requires that there is more than one ring member for MLSAG signatures:
|
||||
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
// src/ringct/rctSigs.cpp#L462
|
||||
if matrix.len() < 2 {
|
||||
Err(MlsagError::InvalidRing)?;
|
||||
}
|
||||
for member in &matrix {
|
||||
if member.is_empty() || (member.len() != matrix[0].len()) {
|
||||
Err(MlsagError::InvalidRing)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RingMatrix { matrix })
|
||||
}
|
||||
|
||||
/// Construct a ring matrix for an individual output.
|
||||
pub fn individual(
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
pseudo_out: EdwardsPoint,
|
||||
) -> Result<Self, MlsagError> {
|
||||
let mut matrix = Vec::with_capacity(ring.len());
|
||||
for ring_member in ring {
|
||||
matrix.push(vec![ring_member[0], ring_member[1] - pseudo_out]);
|
||||
}
|
||||
RingMatrix::new(matrix)
|
||||
}
|
||||
|
||||
/// Iterate over the members of the matrix.
|
||||
fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
self.matrix.iter().map(AsRef::as_ref)
|
||||
}
|
||||
|
||||
/// Get the amount of members in the ring.
|
||||
pub fn members(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
|
||||
/// Get the length of a ring member.
|
||||
///
|
||||
/// A ring member is a vector of points for which the signer knows all of the discrete logarithms
|
||||
/// of.
|
||||
pub fn member_len(&self) -> usize {
|
||||
// this is safe to do as the constructors don't allow empty rings
|
||||
self.matrix[0].len()
|
||||
}
|
||||
}
|
||||
|
||||
/// The MLSAG linkable ring signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Mlsag {
|
||||
ss: Vec<Vec<Scalar>>,
|
||||
cc: Scalar,
|
||||
}
|
||||
|
||||
impl Mlsag {
|
||||
/// Write a MLSAG.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for ss in &self.ss {
|
||||
write_raw_vec(write_scalar, ss, w)?;
|
||||
}
|
||||
write_scalar(&self.cc, w)
|
||||
}
|
||||
|
||||
/// Read a MLSAG.
|
||||
pub fn read<R: Read>(mixins: usize, ss_2_elements: usize, r: &mut R) -> io::Result<Mlsag> {
|
||||
Ok(Mlsag {
|
||||
ss: (0 .. mixins)
|
||||
.map(|_| read_raw_vec(read_scalar, ss_2_elements, r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
cc: read_scalar(r)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Verify a MLSAG.
|
||||
pub fn verify(
|
||||
&self,
|
||||
msg: &[u8; 32],
|
||||
ring: &RingMatrix,
|
||||
key_images: &[EdwardsPoint],
|
||||
) -> Result<(), MlsagError> {
|
||||
// Mlsag allows for layers to not need linkability, hence they don't need key images
|
||||
// Monero requires that there is always only 1 non-linkable layer - the amount commitments.
|
||||
if ring.member_len() != (key_images.len() + 1) {
|
||||
Err(MlsagError::InvalidAmountOfKeyImages)?;
|
||||
}
|
||||
|
||||
let mut buf = Vec::with_capacity(6 * 32);
|
||||
buf.extend_from_slice(msg);
|
||||
|
||||
let mut ci = self.cc;
|
||||
|
||||
// This is an iterator over the key images as options with an added entry of `None` at the
|
||||
// end for the non-linkable layer
|
||||
let key_images_iter = key_images.iter().map(|ki| Some(*ki)).chain(core::iter::once(None));
|
||||
|
||||
if ring.matrix.len() != self.ss.len() {
|
||||
Err(MlsagError::InvalidSs)?;
|
||||
}
|
||||
|
||||
for (ring_member, ss) in ring.iter().zip(&self.ss) {
|
||||
if ring_member.len() != ss.len() {
|
||||
Err(MlsagError::InvalidSs)?;
|
||||
}
|
||||
|
||||
for ((ring_member_entry, s), ki) in ring_member.iter().zip(ss).zip(key_images_iter.clone()) {
|
||||
#[allow(non_snake_case)]
|
||||
let L = EdwardsPoint::vartime_double_scalar_mul_basepoint(&ci, ring_member_entry, s);
|
||||
|
||||
let compressed_ring_member_entry = ring_member_entry.compress();
|
||||
buf.extend_from_slice(compressed_ring_member_entry.as_bytes());
|
||||
buf.extend_from_slice(L.compress().as_bytes());
|
||||
|
||||
// Not all dimensions need to be linkable, e.g. commitments, and only linkable layers need
|
||||
// to have key images.
|
||||
if let Some(ki) = ki {
|
||||
if ki.is_identity() || (!ki.is_torsion_free()) {
|
||||
Err(MlsagError::InvalidKeyImage)?;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = (s * hash_to_point(compressed_ring_member_entry.to_bytes())) + (ci * ki);
|
||||
buf.extend_from_slice(R.compress().as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
ci = keccak256_to_scalar(&buf);
|
||||
// keep the msg in the buffer.
|
||||
buf.drain(msg.len() ..);
|
||||
}
|
||||
|
||||
if ci != self.cc {
|
||||
Err(MlsagError::InvalidCi)?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for a RingMatrix when using an aggregate signature.
|
||||
///
|
||||
/// This handles the formatting as necessary.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRingMatrixBuilder {
|
||||
key_ring: Vec<Vec<EdwardsPoint>>,
|
||||
amounts_ring: Vec<EdwardsPoint>,
|
||||
sum_out: EdwardsPoint,
|
||||
}
|
||||
|
||||
impl AggregateRingMatrixBuilder {
|
||||
/// Create a new AggregateRingMatrixBuilder.
|
||||
///
|
||||
/// This takes in the transaction's outputs' commitments and fee used.
|
||||
pub fn new(commitments: &[EdwardsPoint], fee: u64) -> Self {
|
||||
AggregateRingMatrixBuilder {
|
||||
key_ring: vec![],
|
||||
amounts_ring: vec![],
|
||||
sum_out: commitments.iter().sum::<EdwardsPoint>() + (H() * Scalar::from(fee)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a ring of [output key, commitment] to the matrix.
|
||||
pub fn push_ring(&mut self, ring: &[[EdwardsPoint; 2]]) -> Result<(), MlsagError> {
|
||||
if self.key_ring.is_empty() {
|
||||
self.key_ring = vec![vec![]; ring.len()];
|
||||
// Now that we know the length of the ring, fill the `amounts_ring`.
|
||||
self.amounts_ring = vec![-self.sum_out; ring.len()];
|
||||
}
|
||||
|
||||
if (self.amounts_ring.len() != ring.len()) || ring.is_empty() {
|
||||
// All the rings in an aggregate matrix must be the same length.
|
||||
return Err(MlsagError::InvalidRing);
|
||||
}
|
||||
|
||||
for (i, ring_member) in ring.iter().enumerate() {
|
||||
self.key_ring[i].push(ring_member[0]);
|
||||
self.amounts_ring[i] += ring_member[1]
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build and return the [`RingMatrix`].
|
||||
pub fn build(mut self) -> Result<RingMatrix, MlsagError> {
|
||||
for (i, amount_commitment) in self.amounts_ring.drain(..).enumerate() {
|
||||
self.key_ring[i].push(amount_commitment);
|
||||
}
|
||||
RingMatrix::new(self.key_ring)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user