Tidy and document monero-bulletproofs

I still don't like the impl of the original Bulletproofs...
This commit is contained in:
Luke Parker
2024-06-16 10:39:28 -04:00
parent d9107b53a6
commit 77a2496ade
9 changed files with 242 additions and 259 deletions

View File

@@ -49,12 +49,12 @@ pub fn H_pow_2() -> &'static [EdwardsPoint; 64] {
})
}
// The maximum amount of commitments proven for within a single range proof.
const MAX_M: usize = 16;
// The amount of bits the value within a commitment may use.
const N: usize = 64;
// The maximum amount of bits used within a single range proof.
const MAX_MN: usize = MAX_M * N;
/// The maximum amount of commitments provable for within a single range proof.
pub const MAX_COMMITMENTS: usize = 16;
/// The amount of bits a value within a commitment may use.
pub const COMMITMENT_BITS: usize = 64;
/// The logarithm (over 2) of the amount of bits a value within a commitment may use.
pub const LOG_COMMITMENT_BITS: usize = 6; // 2 ** 6 == N
/// Container struct for Bulletproofs(+) generators.
#[allow(non_snake_case)]
@@ -68,6 +68,9 @@ pub struct Generators {
/// Consumers should not call this function ad-hoc, yet call it within a build script or use a
/// once-initialized static.
pub fn bulletproofs_generators(dst: &'static [u8]) -> Generators {
// The maximum amount of bits used within a single range proof.
const MAX_MN: usize = MAX_COMMITMENTS * COMMITMENT_BITS;
let mut preimage = H().compress().to_bytes().to_vec();
preimage.extend(dst);

View File

@@ -1,4 +1,4 @@
use std_shims::{vec, vec::Vec};
use std_shims::vec::Vec;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT,
@@ -21,12 +21,8 @@ pub(crate) struct InternalBatchVerifier {
}
impl InternalBatchVerifier {
pub fn new() -> Self {
Self { g: Scalar::ZERO, h: Scalar::ZERO, g_bold: vec![], h_bold: vec![], other: vec![] }
}
#[must_use]
pub fn verify(self, G: EdwardsPoint, H: EdwardsPoint, generators: &Generators) -> bool {
fn verify(self, G: EdwardsPoint, H: EdwardsPoint, generators: &Generators) -> bool {
let capacity = 2 + self.g_bold.len() + self.h_bold.len() + self.other.len();
let mut scalars = Vec::with_capacity(capacity);
let mut points = Vec::with_capacity(capacity);
@@ -60,7 +56,7 @@ impl InternalBatchVerifier {
pub(crate) struct BulletproofsBatchVerifier(pub(crate) InternalBatchVerifier);
impl BulletproofsBatchVerifier {
#[must_use]
pub fn verify(self) -> bool {
pub(crate) fn verify(self) -> bool {
self.0.verify(ED25519_BASEPOINT_POINT, H(), original::GENERATORS())
}
}
@@ -69,26 +65,35 @@ impl BulletproofsBatchVerifier {
pub(crate) struct BulletproofsPlusBatchVerifier(pub(crate) InternalBatchVerifier);
impl BulletproofsPlusBatchVerifier {
#[must_use]
pub fn verify(self) -> bool {
pub(crate) fn verify(self) -> bool {
// Bulletproofs+ is written as per the paper, with G for the value and H for the mask
// Monero uses H for the value and G for the mask
self.0.verify(H(), ED25519_BASEPOINT_POINT, plus::GENERATORS())
}
}
/// A batch verifier for Bulletproofs(+).
///
/// This uses a fixed layout such that all fixed points only incur a single point scaling,
/// regardless of the amounts of proofs verified. For all variable points (commitments), they're
/// accumulated with the fixed points into a single multiscalar multiplication.
#[derive(Default)]
pub struct BatchVerifier {
pub(crate) original: BulletproofsBatchVerifier,
pub(crate) plus: BulletproofsPlusBatchVerifier,
}
impl BatchVerifier {
/// Create a new batch verifier.
pub fn new() -> Self {
Self {
original: BulletproofsBatchVerifier(InternalBatchVerifier::new()),
plus: BulletproofsPlusBatchVerifier(InternalBatchVerifier::new()),
original: BulletproofsBatchVerifier(InternalBatchVerifier::default()),
plus: BulletproofsPlusBatchVerifier(InternalBatchVerifier::default()),
}
}
/// Verify all of the proofs queued within this batch verifier.
///
/// This uses a variable-time multiscalar multiplication internally.
#[must_use]
pub fn verify(self) -> bool {
self.original.verify() && self.plus.verify()

View File

@@ -1,25 +1,12 @@
use std_shims::{vec, vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use subtle::{Choice, ConditionallySelectable};
use std_shims::{vec, vec::Vec};
use curve25519_dalek::{
constants::ED25519_BASEPOINT_TABLE,
traits::{MultiscalarMul, VartimeMultiscalarMul},
scalar::Scalar,
edwards::EdwardsPoint,
};
pub(crate) use monero_generators::Generators;
use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
pub(crate) use crate::scalar_vector::*;
// Components common between variants
// TODO: Move to generators? primitives?
pub(crate) const MAX_M: usize = 16;
pub(crate) const LOG_N: usize = 6; // 2 << 6 == N
pub(crate) const N: usize = 64;
pub(crate) use monero_generators::{MAX_COMMITMENTS, COMMITMENT_BITS, LOG_COMMITMENT_BITS};
pub(crate) fn multiexp(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
let mut buf_scalars = Vec::with_capacity(pairs.len());
@@ -41,113 +28,47 @@ pub(crate) fn multiexp_vartime(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint
EdwardsPoint::vartime_multiscalar_mul(buf_scalars, buf_points)
}
pub(crate) fn vector_exponent(
generators: &Generators,
a: &ScalarVector,
b: &ScalarVector,
) -> EdwardsPoint {
debug_assert_eq!(a.len(), b.len());
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
}
/*
This has room for optimization worth investigating further. It currently takes
an iterative approach. It can be optimized further via divide and conquer.
pub(crate) fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
let slice =
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
.concat();
*cache = keccak256_to_scalar(slice);
*cache
}
Assume there are 4 challenges.
pub(crate) fn MN(outputs: usize) -> (usize, usize, usize) {
let mut logM = 0;
let mut M;
while {
M = 1 << logM;
(M <= MAX_M) && (M < outputs)
} {
logM += 1;
}
Iterative approach (current):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across that result and column 2.
3. Do the optimal multiplications across that result and column 3.
(logM + LOG_N, M, M * N)
}
Divide and conquer (worth investigating further):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across challenge column 2 and 3.
3. Multiply both results together.
pub(crate) fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
let (_, M, MN) = MN(commitments.len());
When there are 4 challenges (n=16), the iterative approach does 28 multiplications
versus divide and conquer's 24.
*/
pub(crate) fn challenge_products(challenges: &[(Scalar, Scalar)]) -> Vec<Scalar> {
let mut products = vec![Scalar::ONE; 1 << challenges.len()];
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
let mut aL = ScalarVector::new(MN);
let mut aR = ScalarVector::new(MN);
if !challenges.is_empty() {
products[0] = challenges[0].1;
products[1] = challenges[0].0;
for j in 0 .. M {
for i in (0 .. N).rev() {
let bit =
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
aL.0[(j * N) + i] = Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
aR.0[(j * N) + i] = Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
for (j, challenge) in challenges.iter().enumerate().skip(1) {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * challenge.0;
products[slots - 1] = products[slots / 2] * challenge.1;
slots = slots.saturating_sub(2);
}
}
}
(aL, aR)
}
pub(crate) fn hash_commitments<C: IntoIterator<Item = EdwardsPoint>>(
commitments: C,
) -> (Scalar, Vec<EdwardsPoint>) {
let V = commitments.into_iter().map(|c| c * INV_EIGHT()).collect::<Vec<_>>();
(keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
}
pub(crate) fn alpha_rho<R: RngCore + CryptoRng>(
rng: &mut R,
generators: &Generators,
aL: &ScalarVector,
aR: &ScalarVector,
) -> (Scalar, EdwardsPoint) {
let ar = Scalar::random(rng);
(ar, (vector_exponent(generators, aL, aR) + (ED25519_BASEPOINT_TABLE * &ar)) * INV_EIGHT())
}
pub(crate) fn LR_statements(
a: &ScalarVector,
G_i: &[EdwardsPoint],
b: &ScalarVector,
H_i: &[EdwardsPoint],
cL: Scalar,
U: EdwardsPoint,
) -> Vec<(Scalar, EdwardsPoint)> {
let mut res = a
.0
.iter()
.copied()
.zip(G_i.iter().copied())
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
.collect::<Vec<_>>();
res.push((cL, U));
res
}
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
pub(crate) fn TWO_N() -> &'static ScalarVector {
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), N))
}
pub(crate) fn challenge_products(w: &[Scalar], winv: &[Scalar]) -> Vec<Scalar> {
let mut products = vec![Scalar::ZERO; 1 << w.len()];
products[0] = winv[0];
products[1] = w[0];
for j in 1 .. w.len() {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * w[j];
products[slots - 1] = products[slots / 2] * winv[j];
slots = slots.saturating_sub(2);
// Sanity check since if the above failed to populate, it'd be critical
for product in &products {
debug_assert!(*product != Scalar::ZERO);
}
}
// Sanity check as if the above failed to populate, it'd be critical
for w in &products {
debug_assert!(*w != Scalar::ZERO);
}
products
}

View File

@@ -4,7 +4,8 @@
#![allow(non_snake_case)]
use std_shims::{
vec, vec::Vec,
vec,
vec::Vec,
io::{self, Read, Write},
};
@@ -14,14 +15,16 @@ use zeroize::Zeroizing;
use curve25519_dalek::edwards::EdwardsPoint;
use monero_io::*;
pub use monero_generators::MAX_COMMITMENTS;
use monero_primitives::Commitment;
pub(crate) mod scalar_vector;
pub(crate) mod core;
use crate::core::LOG_N;
use crate::core::LOG_COMMITMENT_BITS;
pub mod batch_verifier;
use batch_verifier::{InternalBatchVerifier, BulletproofsPlusBatchVerifier, BatchVerifier};
pub(crate) mod batch_verifier;
use batch_verifier::{BulletproofsBatchVerifier, BulletproofsPlusBatchVerifier};
pub use batch_verifier::BatchVerifier;
pub(crate) mod original;
use crate::original::OriginalStruct;
@@ -32,18 +35,21 @@ use crate::plus::*;
#[cfg(test)]
mod tests;
pub const MAX_COMMITMENTS: usize = crate::core::MAX_M;
/// An error from proving/verifying Bulletproofs(+).
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "std", derive(thiserror::Error))]
pub enum BulletproofError {
/// Proving/verifying a Bulletproof(+) range proof with no commitments.
#[cfg_attr(feature = "std", error("no commitments to prove the range for"))]
NoCommitments,
/// Proving/verifying a Bulletproof(+) range proof with more commitments than supported.
#[cfg_attr(feature = "std", error("too many commitments to prove the range for"))]
TooManyCommitments,
}
/// Bulletproof enum, encapsulating both Bulletproofs and Bulletproofs+.
/// A Bulletproof(+).
///
/// This encapsulates either a Bulletproof or a Bulletproof+.
#[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Bulletproof {
@@ -60,6 +66,11 @@ impl Bulletproof {
}
}
/// Calculate the weight penalty for the Bulletproof(+).
///
/// Bulletproofs(+) are logarithmically sized yet linearly timed. Evaluating by their size alone
/// accordingly doesn't properly represent the burden of the proof. Monero 'claws back' some of
/// the weight lost by using a proof smaller than it is fast to compensate for this.
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
// src/cryptonote_basic/cryptonote_format_utils.cpp#L106-L124
pub fn calculate_bp_clawback(plus: bool, n_outputs: usize) -> (usize, usize) {
@@ -70,12 +81,12 @@ impl Bulletproof {
LR_len += 1;
n_padded_outputs = 1 << LR_len;
}
LR_len += LOG_N;
LR_len += LOG_COMMITMENT_BITS;
let mut bp_clawback = 0;
if n_padded_outputs > 2 {
let fields = Bulletproof::bp_fields(plus);
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
let base = ((fields + (2 * (LOG_COMMITMENT_BITS + 1))) * 32) / 2;
let size = (fields + (2 * LR_len)) * 32;
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
}
@@ -83,6 +94,7 @@ impl Bulletproof {
(bp_clawback, LR_len)
}
/// Calculate the weight of this proof.
pub fn fee_weight(plus: bool, outputs: usize) -> usize {
#[allow(non_snake_case)]
let (bp_clawback, LR_len) = Bulletproof::calculate_bp_clawback(plus, outputs);
@@ -126,9 +138,15 @@ impl Bulletproof {
#[must_use]
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
match self {
Bulletproof::Original(bp) => bp.verify(rng, commitments),
Bulletproof::Original(bp) => {
let mut verifier = BulletproofsBatchVerifier::default();
if !bp.verify(rng, &mut verifier, commitments) {
return false;
}
verifier.verify()
}
Bulletproof::Plus(bp) => {
let mut verifier = BulletproofsPlusBatchVerifier(InternalBatchVerifier::new());
let mut verifier = BulletproofsPlusBatchVerifier::default();
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
};
@@ -140,11 +158,14 @@ impl Bulletproof {
}
}
/// Accumulate the verification for the given Bulletproof into the specified BatchVerifier.
/// Accumulate the verification for the given Bulletproof(+) into the specified BatchVerifier.
///
/// Returns false if the Bulletproof isn't sane, leaving the BatchVerifier in an undefined
/// Returns false if the Bulletproof(+) isn't sane, leaving the BatchVerifier in an undefined
/// state.
/// Returns true if the Bulletproof is sane, regardless of their validity.
///
/// Returns true if the Bulletproof(+) is sane, regardless of its validity.
///
/// The BatchVerifier must have its verification function executed to actually verify this proof.
#[must_use]
pub fn batch_verify<R: RngCore + CryptoRng>(
&self,
@@ -153,7 +174,7 @@ impl Bulletproof {
commitments: &[EdwardsPoint],
) -> bool {
match self {
Bulletproof::Original(bp) => bp.batch_verify(rng, &mut verifier.original, commitments),
Bulletproof::Original(bp) => bp.verify(rng, &mut verifier.original, commitments),
Bulletproof::Plus(bp) => {
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
@@ -196,16 +217,19 @@ impl Bulletproof {
}
}
/// Write a Bulletproof(+) for the message signed by a transaction's signature.
///
/// This has a distinct encoding from the standard encoding.
pub fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
}
/// Write the Bulletproof(+) to a writer.
/// Write a Bulletproof(+).
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_vec(write_point, points, w))
}
/// Serialize the Bulletproof(+) to a `Vec<u8>`.
/// Serialize a Bulletproof(+) to a `Vec<u8>`.
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
self.write(&mut serialized).unwrap();

View File

@@ -2,25 +2,115 @@ use std_shims::{vec, vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use subtle::{Choice, ConditionallySelectable};
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar, edwards::EdwardsPoint};
use curve25519_dalek::{
constants::{ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE},
scalar::Scalar,
edwards::EdwardsPoint,
};
use monero_generators::H;
use monero_generators::{H, Generators};
use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
use crate::{
core::*,
batch_verifier::{InternalBatchVerifier, BulletproofsBatchVerifier},
};
use crate::{core::*, ScalarVector, batch_verifier::BulletproofsBatchVerifier};
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
pub(crate) fn IP12() -> Scalar {
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
fn TWO_N() -> &'static ScalarVector {
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS))
}
pub(crate) fn hadamard_fold(
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
fn IP12() -> Scalar {
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; COMMITMENT_BITS]).inner_product(TWO_N()))
}
fn MN(outputs: usize) -> (usize, usize, usize) {
let mut logM = 0;
let mut M;
while {
M = 1 << logM;
(M <= MAX_COMMITMENTS) && (M < outputs)
} {
logM += 1;
}
(logM + LOG_COMMITMENT_BITS, M, M * COMMITMENT_BITS)
}
fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
let (_, M, MN) = MN(commitments.len());
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
let mut aL = ScalarVector::new(MN);
let mut aR = ScalarVector::new(MN);
for j in 0 .. M {
for i in (0 .. COMMITMENT_BITS).rev() {
let bit =
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
aL.0[(j * COMMITMENT_BITS) + i] =
Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
aR.0[(j * COMMITMENT_BITS) + i] =
Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
}
}
(aL, aR)
}
fn hash_commitments<C: IntoIterator<Item = EdwardsPoint>>(
commitments: C,
) -> (Scalar, Vec<EdwardsPoint>) {
let V = commitments.into_iter().map(|c| c * INV_EIGHT()).collect::<Vec<_>>();
(keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
}
fn alpha_rho<R: RngCore + CryptoRng>(
rng: &mut R,
generators: &Generators,
aL: &ScalarVector,
aR: &ScalarVector,
) -> (Scalar, EdwardsPoint) {
fn vector_exponent(generators: &Generators, a: &ScalarVector, b: &ScalarVector) -> EdwardsPoint {
debug_assert_eq!(a.len(), b.len());
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
}
let ar = Scalar::random(rng);
(ar, (vector_exponent(generators, aL, aR) + (ED25519_BASEPOINT_TABLE * &ar)) * INV_EIGHT())
}
fn LR_statements(
a: &ScalarVector,
G_i: &[EdwardsPoint],
b: &ScalarVector,
H_i: &[EdwardsPoint],
cL: Scalar,
U: EdwardsPoint,
) -> Vec<(Scalar, EdwardsPoint)> {
let mut res = a
.0
.iter()
.copied()
.zip(G_i.iter().copied())
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
.collect::<Vec<_>>();
res.push((cL, U));
res
}
fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
let slice =
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
.concat();
*cache = keccak256_to_scalar(slice);
*cache
}
fn hadamard_fold(
l: &[EdwardsPoint],
r: &[EdwardsPoint],
a: Scalar,
@@ -33,7 +123,8 @@ pub(crate) fn hadamard_fold(
res
}
/// Internal structure representing a Bulletproof.
/// Internal structure representing a Bulletproof, as defined by Monero..
#[doc(hidden)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OriginalStruct {
pub(crate) A: EdwardsPoint,
@@ -77,7 +168,7 @@ impl OriginalStruct {
let mut zero_twos = Vec::with_capacity(MN);
let zpow = ScalarVector::powers(z, M + 2);
for j in 0 .. M {
for i in 0 .. N {
for i in 0 .. COMMITMENT_BITS {
zero_twos.push(zpow[j + 2] * TWO_N()[i]);
}
}
@@ -152,31 +243,36 @@ impl OriginalStruct {
R.push(R_i);
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
let winv = w.invert();
let w_inv = w.invert();
a = (aL * w) + &(aR * winv);
b = (bL * winv) + &(bR * w);
a = (aL * w) + &(aR * w_inv);
b = (bL * w_inv) + &(bR * w);
if a.len() != 1 {
G_proof = hadamard_fold(G_L, G_R, winv, w);
H_proof = hadamard_fold(H_L, H_R, w, winv);
G_proof = hadamard_fold(G_L, G_R, w_inv, w);
H_proof = hadamard_fold(H_L, H_R, w, w_inv);
}
}
let res = OriginalStruct { A, S, T1, T2, tau_x, mu, L, R, a: a[0], b: b[0], t };
debug_assert!(res.verify(rng, &commitments_points));
#[cfg(debug_assertions)]
let mut verifier = BulletproofsBatchVerifier::default();
debug_assert!(res.verify(rng, &mut verifier, &commitments_points));
debug_assert!(verifier.verify());
res
}
#[must_use]
fn verify_core<R: RngCore + CryptoRng>(
pub(crate) fn verify<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BulletproofsBatchVerifier,
commitments: &[EdwardsPoint],
) -> bool {
// Verify commitments are valid
if commitments.is_empty() || (commitments.len() > MAX_M) {
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
return false;
}
@@ -207,11 +303,11 @@ impl OriginalStruct {
&[x.to_bytes(), self.tau_x.to_bytes(), self.mu.to_bytes(), self.t.to_bytes()],
);
let mut w = Vec::with_capacity(logMN);
let mut winv = Vec::with_capacity(logMN);
let mut w_and_w_inv = Vec::with_capacity(logMN);
for (L, R) in self.L.iter().zip(&self.R) {
w.push(hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]));
winv.push(cache.invert());
let w = hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]);
let w_inv = w.invert();
w_and_w_inv.push((w, w_inv));
}
// Convert the proof from * INV_EIGHT to its actual form
@@ -233,7 +329,7 @@ impl OriginalStruct {
{
let verifier_weight = Scalar::random(rng);
let ip1y = ScalarVector::powers(y, M * N).sum();
let ip1y = ScalarVector::powers(y, M * COMMITMENT_BITS).sum();
let mut k = -(zpow[2] * ip1y);
for j in 1 ..= M {
k -= zpow[j + 2] * IP12();
@@ -266,7 +362,7 @@ impl OriginalStruct {
let yinv = y.invert();
let yinvpow = ScalarVector::powers(yinv, MN);
let w_cache = challenge_products(&w, &winv);
let w_cache = challenge_products(&w_and_w_inv);
while verifier.0.g_bold.len() < MN {
verifier.0.g_bold.push(Scalar::ZERO);
@@ -280,41 +376,18 @@ impl OriginalStruct {
verifier.0.g_bold[i] -= verifier_weight * g;
let mut h = self.b * yinvpow[i] * w_cache[(!i) & (MN - 1)];
h -= ((zpow[(i / N) + 2] * TWO_N()[i % N]) + (z * ypow[i])) * yinvpow[i];
h -= ((zpow[(i / COMMITMENT_BITS) + 2] * TWO_N()[i % COMMITMENT_BITS]) + (z * ypow[i])) *
yinvpow[i];
verifier.0.h_bold[i] -= verifier_weight * h;
}
}
for i in 0 .. logMN {
verifier.0.other.push((verifier_weight * (w[i] * w[i]), L[i]));
verifier.0.other.push((verifier_weight * (winv[i] * winv[i]), R[i]));
verifier.0.other.push((verifier_weight * (w_and_w_inv[i].0 * w_and_w_inv[i].0), L[i]));
verifier.0.other.push((verifier_weight * (w_and_w_inv[i].1 * w_and_w_inv[i].1), R[i]));
}
}
true
}
#[must_use]
pub(crate) fn verify<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
commitments: &[EdwardsPoint],
) -> bool {
let mut verifier = BulletproofsBatchVerifier(InternalBatchVerifier::new());
if self.verify_core(rng, &mut verifier, commitments) {
verifier.verify()
} else {
false
}
}
#[must_use]
pub(crate) fn batch_verify<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BulletproofsBatchVerifier,
commitments: &[EdwardsPoint],
) -> bool {
self.verify_core(rng, verifier, commitments)
}
}

View File

@@ -9,7 +9,7 @@ use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
use crate::{
batch_verifier::BulletproofsPlusBatchVerifier,
core::{MAX_M, N, multiexp, multiexp_vartime},
core::{MAX_COMMITMENTS, COMMITMENT_BITS, multiexp, multiexp_vartime},
plus::{
ScalarVector, PointVector, GeneratorsList, BpPlusGenerators,
transcript::*,
@@ -36,7 +36,7 @@ pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
impl AggregateRangeWitness {
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
if commitments.is_empty() || (commitments.len() > MAX_M) {
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
return None;
}
@@ -44,7 +44,8 @@ impl AggregateRangeWitness {
}
}
/// Internal structure representing a Bulletproof+, as used in Monero.
/// Internal structure representing a Bulletproof+, as defined by Monero..
#[doc(hidden)]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct AggregateRangeProof {
pub(crate) A: EdwardsPoint,
@@ -62,7 +63,7 @@ struct AHatComputation {
impl AggregateRangeStatement {
pub(crate) fn new(V: Vec<EdwardsPoint>) -> Option<Self> {
if V.is_empty() || (V.len() > MAX_M) {
if V.is_empty() || (V.len() > MAX_COMMITMENTS) {
return None;
}
@@ -79,12 +80,12 @@ impl AggregateRangeStatement {
}
fn d_j(j: usize, m: usize) -> ScalarVector {
let mut d_j = Vec::with_capacity(m * N);
for _ in 0 .. (j - 1) * N {
let mut d_j = Vec::with_capacity(m * COMMITMENT_BITS);
for _ in 0 .. (j - 1) * COMMITMENT_BITS {
d_j.push(Scalar::ZERO);
}
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), N).0);
for _ in 0 .. (m - j) * N {
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS).0);
for _ in 0 .. (m - j) * COMMITMENT_BITS {
d_j.push(Scalar::ZERO);
}
ScalarVector(d_j)
@@ -102,7 +103,7 @@ impl AggregateRangeStatement {
while V.len() < padded_pow_of_2(V.len()) {
V.0.push(EdwardsPoint::identity());
}
let mn = V.len() * N;
let mn = V.len() * COMMITMENT_BITS;
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
let mut z_pow = Vec::with_capacity(V.len());
@@ -188,10 +189,10 @@ impl AggregateRangeStatement {
V.push(EdwardsPoint::identity());
}
let generators = generators.reduce(V.len() * N);
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
let mut d_js = Vec::with_capacity(V.len());
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * COMMITMENT_BITS));
for j in 1 ..= V.len() {
d_js.push(Self::d_j(j, V.len()));
#[allow(clippy::map_unwrap_or)]
@@ -251,7 +252,7 @@ impl AggregateRangeStatement {
let mut transcript = initial_transcript(V.iter());
let V = V.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
let generators = generators.reduce(V.len() * N);
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
let AHatComputation { y, A_hat, .. } =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, proof.A);

View File

@@ -7,7 +7,7 @@ use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use monero_primitives::{INV_EIGHT, keccak256_to_scalar};
use crate::{
core::{multiexp, multiexp_vartime},
core::{multiexp, multiexp_vartime, challenge_products},
batch_verifier::BulletproofsPlusBatchVerifier,
plus::{ScalarVector, PointVector, GeneratorsList, BpPlusGenerators, padded_pow_of_2},
};
@@ -146,51 +146,6 @@ impl WipStatement {
(e, inv_e, e_square, inv_e_square, PointVector(new_g_bold), PointVector(new_h_bold))
}
/*
This has room for optimization worth investigating further. It currently takes
an iterative approach. It can be optimized further via divide and conquer.
Assume there are 4 challenges.
Iterative approach (current):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across that result and column 2.
3. Do the optimal multiplications across that result and column 3.
Divide and conquer (worth investigating further):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across challenge column 2 and 3.
3. Multiply both results together.
When there are 4 challenges (n=16), the iterative approach does 28 multiplications
versus divide and conquer's 24.
*/
fn challenge_products(challenges: &[(Scalar, Scalar)]) -> Vec<Scalar> {
let mut products = vec![Scalar::ONE; 1 << challenges.len()];
if !challenges.is_empty() {
products[0] = challenges[0].1;
products[1] = challenges[0].0;
for (j, challenge) in challenges.iter().enumerate().skip(1) {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * challenge.0;
products[slots - 1] = products[slots / 2] * challenge.1;
slots = slots.saturating_sub(2);
}
}
// Sanity check since if the above failed to populate, it'd be critical
for product in &products {
debug_assert!(*product != Scalar::ZERO);
}
}
products
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
self,
rng: &mut R,
@@ -409,7 +364,7 @@ impl WipStatement {
verifier.0.other.push((neg_e_square * inv_e_i_square, *R));
}
Self::challenge_products(&challenges)
challenge_products(&challenges)
};
while verifier.0.g_bold.len() < generators.len() {

View File

@@ -5,7 +5,8 @@
use core::ops::Deref;
use std_shims::{
vec, vec::Vec,
vec,
vec::Vec,
io::{self, Read, Write},
};

View File

@@ -9,7 +9,7 @@ mod binaries {
pub(crate) use monero_serai::{
Commitment,
ringct::{RctPrunable, bulletproofs::batch_verifier::BatchVerifier},
ringct::{RctPrunable, bulletproofs::BatchVerifier},
transaction::{Input, Transaction},
block::Block,
rpc::{RpcError, Rpc, HttpRpc},