Smash out monero-bulletproofs

Removes usage of dalek-ff-group/multiexp for curve25519-dalek.

Makes compiling in the generators an optional feature.

Adds a structured batch verifier which should be notably more performant.

Documentation and clean up still necessary.
This commit is contained in:
Luke Parker
2024-06-15 17:00:11 -04:00
parent 798ffc9b28
commit f7c13fd1ca
32 changed files with 716 additions and 502 deletions

View File

@@ -4,14 +4,12 @@ mod binaries {
pub(crate) use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
pub(crate) use multiexp::BatchVerifier;
pub(crate) use serde::Deserialize;
pub(crate) use serde_json::json;
pub(crate) use monero_serai::{
Commitment,
ringct::RctPrunable,
ringct::{RctPrunable, bulletproofs::batch_verifier::BatchVerifier},
transaction::{Input, Transaction},
block::Block,
rpc::{RpcError, Rpc, HttpRpc},
@@ -95,7 +93,7 @@ mod binaries {
all_txs.extend(txs.txs);
}
let mut batch = BatchVerifier::new(block.txs.len());
let mut batch = BatchVerifier::new();
for (tx_hash, tx_res) in block.txs.into_iter().zip(all_txs) {
assert_eq!(
tx_res.tx_hash,
@@ -135,7 +133,6 @@ mod binaries {
assert!(bulletproofs.batch_verify(
&mut rand_core::OsRng,
&mut batch,
(),
&tx.rct_signatures.base.commitments
));
}
@@ -143,7 +140,6 @@ mod binaries {
assert!(bulletproofs.batch_verify(
&mut rand_core::OsRng,
&mut batch,
(),
&tx.rct_signatures.base.commitments
));
@@ -234,7 +230,7 @@ mod binaries {
}
}
}
assert!(batch.verify_vartime());
assert!(batch.verify());
}
println!("Deserialized, hashed, and reserialized {block_i} with {txs_len} TXs");

View File

@@ -1,151 +0,0 @@
use std_shims::{vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use subtle::{Choice, ConditionallySelectable};
use curve25519_dalek::edwards::EdwardsPoint as DalekPoint;
use group::{ff::Field, Group};
use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp as multiexp_const;
pub(crate) use monero_generators::Generators;
use crate::{INV_EIGHT as DALEK_INV_EIGHT, H as DALEK_H, Commitment, hash_to_scalar as dalek_hash};
pub(crate) use crate::ringct::bulletproofs::scalar_vector::*;
#[inline]
pub(crate) fn INV_EIGHT() -> Scalar {
Scalar(DALEK_INV_EIGHT())
}
#[inline]
pub(crate) fn H() -> EdwardsPoint {
EdwardsPoint(DALEK_H())
}
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar(dalek_hash(data))
}
// Components common between variants
pub(crate) const MAX_M: usize = 16;
pub(crate) const LOG_N: usize = 6; // 2 << 6 == N
pub(crate) const N: usize = 64;
pub(crate) fn prove_multiexp(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
multiexp_const(pairs) * INV_EIGHT()
}
pub(crate) fn vector_exponent(
generators: &Generators,
a: &ScalarVector,
b: &ScalarVector,
) -> EdwardsPoint {
debug_assert_eq!(a.len(), b.len());
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
}
pub(crate) fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
let slice =
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
.concat();
*cache = hash_to_scalar(slice);
*cache
}
pub(crate) fn MN(outputs: usize) -> (usize, usize, usize) {
let mut logM = 0;
let mut M;
while {
M = 1 << logM;
(M <= MAX_M) && (M < outputs)
} {
logM += 1;
}
(logM + LOG_N, M, M * N)
}
pub(crate) fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
let (_, M, MN) = MN(commitments.len());
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
let mut aL = ScalarVector::new(MN);
let mut aR = ScalarVector::new(MN);
for j in 0 .. M {
for i in (0 .. N).rev() {
let bit =
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
aL.0[(j * N) + i] = Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
aR.0[(j * N) + i] = Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
}
}
(aL, aR)
}
pub(crate) fn hash_commitments<C: IntoIterator<Item = DalekPoint>>(
commitments: C,
) -> (Scalar, Vec<EdwardsPoint>) {
let V = commitments.into_iter().map(|c| EdwardsPoint(c) * INV_EIGHT()).collect::<Vec<_>>();
(hash_to_scalar(&V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
}
pub(crate) fn alpha_rho<R: RngCore + CryptoRng>(
rng: &mut R,
generators: &Generators,
aL: &ScalarVector,
aR: &ScalarVector,
) -> (Scalar, EdwardsPoint) {
let ar = Scalar::random(rng);
(ar, (vector_exponent(generators, aL, aR) + (EdwardsPoint::generator() * ar)) * INV_EIGHT())
}
pub(crate) fn LR_statements(
a: &ScalarVector,
G_i: &[EdwardsPoint],
b: &ScalarVector,
H_i: &[EdwardsPoint],
cL: Scalar,
U: EdwardsPoint,
) -> Vec<(Scalar, EdwardsPoint)> {
let mut res = a
.0
.iter()
.copied()
.zip(G_i.iter().copied())
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
.collect::<Vec<_>>();
res.push((cL, U));
res
}
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
pub(crate) fn TWO_N() -> &'static ScalarVector {
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), N))
}
pub(crate) fn challenge_products(w: &[Scalar], winv: &[Scalar]) -> Vec<Scalar> {
let mut products = vec![Scalar::ZERO; 1 << w.len()];
products[0] = winv[0];
products[1] = w[0];
for j in 1 .. w.len() {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * w[j];
products[slots - 1] = products[slots / 2] * winv[j];
slots = slots.saturating_sub(2);
}
}
// Sanity check as if the above failed to populate, it'd be critical
for w in &products {
debug_assert!(!bool::from(w.is_zero()));
}
products
}

View File

@@ -1,233 +0,0 @@
#![allow(non_snake_case)]
use std_shims::{
vec::Vec,
io::{self, Read, Write},
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing};
use curve25519_dalek::edwards::EdwardsPoint;
use multiexp::BatchVerifier;
use crate::{Commitment, wallet::TransactionError, serialize::*};
pub(crate) mod scalar_vector;
pub(crate) mod core;
use self::core::LOG_N;
pub(crate) mod original;
use self::original::OriginalStruct;
pub(crate) mod plus;
use self::plus::*;
pub(crate) const MAX_OUTPUTS: usize = self::core::MAX_M;
/// Bulletproof enum, encapsulating both Bulletproofs and Bulletproofs+.
#[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Bulletproof {
Original(OriginalStruct),
Plus(AggregateRangeProof),
}
impl Bulletproof {
fn bp_fields(plus: bool) -> usize {
if plus {
6
} else {
9
}
}
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
// src/cryptonote_basic/cryptonote_format_utils.cpp#L106-L124
pub(crate) fn calculate_bp_clawback(plus: bool, n_outputs: usize) -> (usize, usize) {
#[allow(non_snake_case)]
let mut LR_len = 0;
let mut n_padded_outputs = 1;
while n_padded_outputs < n_outputs {
LR_len += 1;
n_padded_outputs = 1 << LR_len;
}
LR_len += LOG_N;
let mut bp_clawback = 0;
if n_padded_outputs > 2 {
let fields = Bulletproof::bp_fields(plus);
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
let size = (fields + (2 * LR_len)) * 32;
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
}
(bp_clawback, LR_len)
}
pub(crate) fn fee_weight(plus: bool, outputs: usize) -> usize {
#[allow(non_snake_case)]
let (bp_clawback, LR_len) = Bulletproof::calculate_bp_clawback(plus, outputs);
32 * (Bulletproof::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
}
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof.
pub fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
outputs: &[Commitment],
) -> Result<Bulletproof, TransactionError> {
if outputs.is_empty() {
Err(TransactionError::NoOutputs)?;
}
if outputs.len() > MAX_OUTPUTS {
Err(TransactionError::TooManyOutputs)?;
}
Ok(Bulletproof::Original(OriginalStruct::prove(rng, outputs)))
}
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof+.
pub fn prove_plus<R: RngCore + CryptoRng>(
rng: &mut R,
outputs: Vec<Commitment>,
) -> Result<Bulletproof, TransactionError> {
if outputs.is_empty() {
Err(TransactionError::NoOutputs)?;
}
if outputs.len() > MAX_OUTPUTS {
Err(TransactionError::TooManyOutputs)?;
}
Ok(Bulletproof::Plus(
AggregateRangeStatement::new(outputs.iter().map(Commitment::calculate).collect())
.unwrap()
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
.unwrap(),
))
}
/// Verify the given Bulletproof(+).
#[must_use]
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
match self {
Bulletproof::Original(bp) => bp.verify(rng, commitments),
Bulletproof::Plus(bp) => {
let mut verifier = BatchVerifier::new(1);
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
};
if !statement.verify(rng, &mut verifier, (), bp.clone()) {
return false;
}
verifier.verify_vartime()
}
}
}
/// Accumulate the verification for the given Bulletproof into the specified BatchVerifier.
///
/// Returns false if the Bulletproof isn't sane, leaving the BatchVerifier in an undefined
/// state.
/// Returns true if the Bulletproof is sane, regardless of their validity.
#[must_use]
pub fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, dalek_ff_group::EdwardsPoint>,
id: ID,
commitments: &[EdwardsPoint],
) -> bool {
match self {
Bulletproof::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
Bulletproof::Plus(bp) => {
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
};
statement.verify(rng, verifier, id, bp.clone())
}
}
}
fn write_core<W: Write, F: Fn(&[EdwardsPoint], &mut W) -> io::Result<()>>(
&self,
w: &mut W,
specific_write_vec: F,
) -> io::Result<()> {
match self {
Bulletproof::Original(bp) => {
write_point(&bp.A, w)?;
write_point(&bp.S, w)?;
write_point(&bp.T1, w)?;
write_point(&bp.T2, w)?;
write_scalar(&bp.taux, w)?;
write_scalar(&bp.mu, w)?;
specific_write_vec(&bp.L, w)?;
specific_write_vec(&bp.R, w)?;
write_scalar(&bp.a, w)?;
write_scalar(&bp.b, w)?;
write_scalar(&bp.t, w)
}
Bulletproof::Plus(bp) => {
write_point(&bp.A.0, w)?;
write_point(&bp.wip.A.0, w)?;
write_point(&bp.wip.B.0, w)?;
write_scalar(&bp.wip.r_answer.0, w)?;
write_scalar(&bp.wip.s_answer.0, w)?;
write_scalar(&bp.wip.delta_answer.0, w)?;
specific_write_vec(&bp.wip.L.iter().copied().map(|L| L.0).collect::<Vec<_>>(), w)?;
specific_write_vec(&bp.wip.R.iter().copied().map(|R| R.0).collect::<Vec<_>>(), w)
}
}
}
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
}
/// Write the Bulletproof(+) to a writer.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_vec(write_point, points, w))
}
/// Serialize the Bulletproof(+) to a `Vec<u8>`.
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
self.write(&mut serialized).unwrap();
serialized
}
/// Read a Bulletproof.
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
Ok(Bulletproof::Original(OriginalStruct {
A: read_point(r)?,
S: read_point(r)?,
T1: read_point(r)?,
T2: read_point(r)?,
taux: read_scalar(r)?,
mu: read_scalar(r)?,
L: read_vec(read_point, r)?,
R: read_vec(read_point, r)?,
a: read_scalar(r)?,
b: read_scalar(r)?,
t: read_scalar(r)?,
}))
}
/// Read a Bulletproof+.
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
use dalek_ff_group::{Scalar as DfgScalar, EdwardsPoint as DfgPoint};
Ok(Bulletproof::Plus(AggregateRangeProof {
A: DfgPoint(read_point(r)?),
wip: WipProof {
A: DfgPoint(read_point(r)?),
B: DfgPoint(read_point(r)?),
r_answer: DfgScalar(read_scalar(r)?),
s_answer: DfgScalar(read_scalar(r)?),
delta_answer: DfgScalar(read_scalar(r)?),
L: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
R: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
},
}))
}
}

View File

@@ -1,323 +0,0 @@
use std_shims::{vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as DalekPoint};
use group::{ff::Field, Group};
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
use multiexp::{BatchVerifier, multiexp};
use crate::{Commitment, ringct::bulletproofs::core::*};
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
pub(crate) fn IP12() -> Scalar {
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
}
pub(crate) fn hadamard_fold(
l: &[EdwardsPoint],
r: &[EdwardsPoint],
a: Scalar,
b: Scalar,
) -> Vec<EdwardsPoint> {
let mut res = Vec::with_capacity(l.len() / 2);
for i in 0 .. l.len() {
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
}
res
}
/// Internal structure representing a Bulletproof.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OriginalStruct {
pub(crate) A: DalekPoint,
pub(crate) S: DalekPoint,
pub(crate) T1: DalekPoint,
pub(crate) T2: DalekPoint,
pub(crate) taux: DalekScalar,
pub(crate) mu: DalekScalar,
pub(crate) L: Vec<DalekPoint>,
pub(crate) R: Vec<DalekPoint>,
pub(crate) a: DalekScalar,
pub(crate) b: DalekScalar,
pub(crate) t: DalekScalar,
}
impl OriginalStruct {
pub(crate) fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
commitments: &[Commitment],
) -> OriginalStruct {
let (logMN, M, MN) = MN(commitments.len());
let (aL, aR) = bit_decompose(commitments);
let commitments_points = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
let (mut cache, _) = hash_commitments(commitments_points.clone());
let (sL, sR) =
ScalarVector((0 .. (MN * 2)).map(|_| Scalar::random(&mut *rng)).collect::<Vec<_>>()).split();
let generators = GENERATORS();
let (mut alpha, A) = alpha_rho(&mut *rng, generators, &aL, &aR);
let (mut rho, S) = alpha_rho(&mut *rng, generators, &sL, &sR);
let y = hash_cache(&mut cache, &[A.compress().to_bytes(), S.compress().to_bytes()]);
let mut cache = hash_to_scalar(&y.to_bytes());
let z = cache;
let l0 = aL - z;
let l1 = sL;
let mut zero_twos = Vec::with_capacity(MN);
let zpow = ScalarVector::powers(z, M + 2);
for j in 0 .. M {
for i in 0 .. N {
zero_twos.push(zpow[j + 2] * TWO_N()[i]);
}
}
let yMN = ScalarVector::powers(y, MN);
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
let r1 = yMN * &sR;
let (T1, T2, x, mut taux) = {
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
let t2 = l1.clone().inner_product(&r1);
let mut tau1 = Scalar::random(&mut *rng);
let mut tau2 = Scalar::random(&mut *rng);
let T1 = prove_multiexp(&[(t1, H()), (tau1, EdwardsPoint::generator())]);
let T2 = prove_multiexp(&[(t2, H()), (tau2, EdwardsPoint::generator())]);
let x =
hash_cache(&mut cache, &[z.to_bytes(), T1.compress().to_bytes(), T2.compress().to_bytes()]);
let taux = (tau2 * (x * x)) + (tau1 * x);
tau1.zeroize();
tau2.zeroize();
(T1, T2, x, taux)
};
let mu = (x * rho) + alpha;
alpha.zeroize();
rho.zeroize();
for (i, gamma) in commitments.iter().map(|c| Scalar(c.mask)).enumerate() {
taux += zpow[i + 2] * gamma;
}
let l = l0 + &(l1 * x);
let r = r0 + &(r1 * x);
let t = l.clone().inner_product(&r);
let x_ip =
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
let mut a = l;
let mut b = r;
let yinv = y.invert().unwrap();
let yinvpow = ScalarVector::powers(yinv, MN);
let mut G_proof = generators.G[.. a.len()].to_vec();
let mut H_proof = generators.H[.. a.len()].to_vec();
H_proof.iter_mut().zip(yinvpow.0.iter()).for_each(|(this_H, yinvpow)| *this_H *= yinvpow);
let U = H() * x_ip;
let mut L = Vec::with_capacity(logMN);
let mut R = Vec::with_capacity(logMN);
while a.len() != 1 {
let (aL, aR) = a.split();
let (bL, bR) = b.split();
let cL = aL.clone().inner_product(&bR);
let cR = aR.clone().inner_product(&bL);
let (G_L, G_R) = G_proof.split_at(aL.len());
let (H_L, H_R) = H_proof.split_at(aL.len());
let L_i = prove_multiexp(&LR_statements(&aL, G_R, &bR, H_L, cL, U));
let R_i = prove_multiexp(&LR_statements(&aR, G_L, &bL, H_R, cR, U));
L.push(L_i);
R.push(R_i);
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
let winv = w.invert().unwrap();
a = (aL * w) + &(aR * winv);
b = (bL * winv) + &(bR * w);
if a.len() != 1 {
G_proof = hadamard_fold(G_L, G_R, winv, w);
H_proof = hadamard_fold(H_L, H_R, w, winv);
}
}
let res = OriginalStruct {
A: *A,
S: *S,
T1: *T1,
T2: *T2,
taux: *taux,
mu: *mu,
L: L.drain(..).map(|L| *L).collect(),
R: R.drain(..).map(|R| *R).collect(),
a: *a[0],
b: *b[0],
t: *t,
};
debug_assert!(res.verify(rng, &commitments_points));
res
}
#[must_use]
fn verify_core<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
id: ID,
commitments: &[DalekPoint],
) -> bool {
// Verify commitments are valid
if commitments.is_empty() || (commitments.len() > MAX_M) {
return false;
}
// Verify L and R are properly sized
if self.L.len() != self.R.len() {
return false;
}
let (logMN, M, MN) = MN(commitments.len());
if self.L.len() != logMN {
return false;
}
// Rebuild all challenges
let (mut cache, commitments) = hash_commitments(commitments.iter().copied());
let y = hash_cache(&mut cache, &[self.A.compress().to_bytes(), self.S.compress().to_bytes()]);
let z = hash_to_scalar(&y.to_bytes());
cache = z;
let x = hash_cache(
&mut cache,
&[z.to_bytes(), self.T1.compress().to_bytes(), self.T2.compress().to_bytes()],
);
let x_ip = hash_cache(
&mut cache,
&[x.to_bytes(), self.taux.to_bytes(), self.mu.to_bytes(), self.t.to_bytes()],
);
let mut w = Vec::with_capacity(logMN);
let mut winv = Vec::with_capacity(logMN);
for (L, R) in self.L.iter().zip(&self.R) {
w.push(hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]));
winv.push(cache.invert().unwrap());
}
// Convert the proof from * INV_EIGHT to its actual form
let normalize = |point: &DalekPoint| EdwardsPoint(point.mul_by_cofactor());
let L = self.L.iter().map(normalize).collect::<Vec<_>>();
let R = self.R.iter().map(normalize).collect::<Vec<_>>();
let T1 = normalize(&self.T1);
let T2 = normalize(&self.T2);
let A = normalize(&self.A);
let S = normalize(&self.S);
let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
// Verify it
let mut proof = Vec::with_capacity(4 + commitments.len());
let zpow = ScalarVector::powers(z, M + 3);
let ip1y = ScalarVector::powers(y, M * N).sum();
let mut k = -(zpow[2] * ip1y);
for j in 1 ..= M {
k -= zpow[j + 2] * IP12();
}
let y1 = Scalar(self.t) - ((z * ip1y) + k);
proof.push((-y1, H()));
proof.push((-Scalar(self.taux), G));
for (j, commitment) in commitments.iter().enumerate() {
proof.push((zpow[j + 2], *commitment));
}
proof.push((x, T1));
proof.push((x * x, T2));
verifier.queue(&mut *rng, id, proof);
proof = Vec::with_capacity(4 + (2 * (MN + logMN)));
let z3 = (Scalar(self.t) - (Scalar(self.a) * Scalar(self.b))) * x_ip;
proof.push((z3, H()));
proof.push((-Scalar(self.mu), G));
proof.push((Scalar::ONE, A));
proof.push((x, S));
{
let ypow = ScalarVector::powers(y, MN);
let yinv = y.invert().unwrap();
let yinvpow = ScalarVector::powers(yinv, MN);
let w_cache = challenge_products(&w, &winv);
let generators = GENERATORS();
for i in 0 .. MN {
let g = (Scalar(self.a) * w_cache[i]) + z;
proof.push((-g, generators.G[i]));
let mut h = Scalar(self.b) * yinvpow[i] * w_cache[(!i) & (MN - 1)];
h -= ((zpow[(i / N) + 2] * TWO_N()[i % N]) + (z * ypow[i])) * yinvpow[i];
proof.push((-h, generators.H[i]));
}
}
for i in 0 .. logMN {
proof.push((w[i] * w[i], L[i]));
proof.push((winv[i] * winv[i], R[i]));
}
verifier.queue(rng, id, proof);
true
}
#[must_use]
pub(crate) fn verify<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
commitments: &[DalekPoint],
) -> bool {
let mut verifier = BatchVerifier::new(1);
if self.verify_core(rng, &mut verifier, (), commitments) {
verifier.verify_vartime()
} else {
false
}
}
#[must_use]
pub(crate) fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
id: ID,
commitments: &[DalekPoint],
) -> bool {
self.verify_core(rng, verifier, id, commitments)
}
}

View File

@@ -1,259 +0,0 @@
use std_shims::vec::Vec;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
use group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use curve25519_dalek::EdwardsPoint as DalekPoint;
use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::{
Commitment,
ringct::{
bulletproofs::core::{MAX_M, N},
bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators,
transcript::*,
weighted_inner_product::{WipStatement, WipWitness, WipProof},
padded_pow_of_2, u64_decompose,
},
},
};
// Figure 3 of the Bulletproofs+ Paper
#[derive(Clone, Debug)]
pub(crate) struct AggregateRangeStatement {
generators: Generators,
V: Vec<DalekPoint>,
}
impl Zeroize for AggregateRangeStatement {
fn zeroize(&mut self) {
self.V.zeroize();
}
}
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
impl AggregateRangeWitness {
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
if commitments.is_empty() || (commitments.len() > MAX_M) {
return None;
}
Some(AggregateRangeWitness(commitments))
}
}
/// Internal structure representing a Bulletproof+, as used in Monero.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct AggregateRangeProof {
pub(crate) A: EdwardsPoint,
pub(crate) wip: WipProof,
}
impl AggregateRangeStatement {
pub(crate) fn new(V: Vec<DalekPoint>) -> Option<Self> {
if V.is_empty() || (V.len() > MAX_M) {
return None;
}
Some(Self { generators: Generators::new(), V })
}
fn transcript_A(transcript: &mut Scalar, A: EdwardsPoint) -> (Scalar, Scalar) {
let y = hash_to_scalar(&[transcript.to_repr().as_ref(), A.to_bytes().as_ref()].concat());
let z = hash_to_scalar(y.to_bytes().as_ref());
*transcript = z;
(y, z)
}
fn d_j(j: usize, m: usize) -> ScalarVector {
let mut d_j = Vec::with_capacity(m * N);
for _ in 0 .. (j - 1) * N {
d_j.push(Scalar::ZERO);
}
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), N).0);
for _ in 0 .. (m - j) * N {
d_j.push(Scalar::ZERO);
}
ScalarVector(d_j)
}
fn compute_A_hat(
mut V: PointVector,
generators: &Generators,
transcript: &mut Scalar,
mut A: EdwardsPoint,
) -> (Scalar, ScalarVector, Scalar, Scalar, ScalarVector, EdwardsPoint) {
let (y, z) = Self::transcript_A(transcript, A);
A = A.mul_by_cofactor();
while V.len() < padded_pow_of_2(V.len()) {
V.0.push(EdwardsPoint::identity());
}
let mn = V.len() * N;
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
let mut z_pow = Vec::with_capacity(V.len());
// z**2
z_pow.push(z * z);
let mut d = ScalarVector::new(mn);
for j in 1 ..= V.len() {
z_pow.push(*z_pow.last().unwrap() * z_pow[0]);
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
}
let mut ascending_y = ScalarVector(vec![y]);
for i in 1 .. d.len() {
ascending_y.0.push(ascending_y[i - 1] * y);
}
let y_pows = ascending_y.clone().sum();
let mut descending_y = ascending_y.clone();
descending_y.0.reverse();
let d_descending_y = d.clone() * &descending_y;
let d_descending_y_plus_z = d_descending_y + z;
let y_mn_plus_one = descending_y[0] * y;
let mut commitment_accum = EdwardsPoint::identity();
for (j, commitment) in V.0.iter().enumerate() {
commitment_accum += *commitment * z_pow[j];
}
let neg_z = -z;
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
}
A_terms.push((y_mn_plus_one, commitment_accum));
A_terms.push((
((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * z.square())),
Generators::g(),
));
(
y,
d_descending_y_plus_z,
y_mn_plus_one,
z,
ScalarVector(z_pow),
A + multiexp_vartime(&A_terms),
)
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
self,
rng: &mut R,
witness: &AggregateRangeWitness,
) -> Option<AggregateRangeProof> {
// Check for consistency with the witness
if self.V.len() != witness.0.len() {
return None;
}
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
if witness.calculate() != *commitment {
return None;
}
}
let Self { generators, V } = self;
// Monero expects all of these points to be torsion-free
// Generally, for Bulletproofs, it sends points * INV_EIGHT and then performs a torsion clear
// by multiplying by 8
// This also restores the original value due to the preprocessing
// Commitments aren't transmitted INV_EIGHT though, so this multiplies by INV_EIGHT to enable
// clearing its cofactor without mutating the value
// For some reason, these values are transcripted * INV_EIGHT, not as transmitted
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
let mut transcript = initial_transcript(V.iter());
let mut V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
// Pad V
while V.len() < padded_pow_of_2(V.len()) {
V.push(EdwardsPoint::identity());
}
let generators = generators.reduce(V.len() * N);
let mut d_js = Vec::with_capacity(V.len());
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
for j in 1 ..= V.len() {
d_js.push(Self::d_j(j, V.len()));
#[allow(clippy::map_unwrap_or)]
a_l.0.append(
&mut u64_decompose(
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
)
.0,
);
}
let a_r = a_l.clone() - Scalar::ONE;
let alpha = Scalar::random(&mut *rng);
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 1);
for (i, a_l) in a_l.0.iter().enumerate() {
A_terms.push((*a_l, generators.generator(GeneratorsList::GBold1, i)));
}
for (i, a_r) in a_r.0.iter().enumerate() {
A_terms.push((*a_r, generators.generator(GeneratorsList::HBold1, i)));
}
A_terms.push((alpha, Generators::h()));
let mut A = multiexp(&A_terms);
A_terms.zeroize();
// Multiply by INV_EIGHT per earlier commentary
A.0 *= crate::INV_EIGHT();
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
let a_l = a_l - z;
let a_r = a_r + &d_descending_y_plus_z;
let mut alpha = alpha;
for j in 1 ..= witness.0.len() {
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
}
Some(AggregateRangeProof {
A,
wip: WipStatement::new(generators, A_hat, y)
.prove(rng, transcript, &Zeroizing::new(WipWitness::new(a_l, a_r, alpha).unwrap()))
.unwrap(),
})
}
pub(crate) fn verify<Id: Copy + Zeroize, R: RngCore + CryptoRng>(
self,
rng: &mut R,
verifier: &mut BatchVerifier<Id, EdwardsPoint>,
id: Id,
proof: AggregateRangeProof,
) -> bool {
let Self { generators, V } = self;
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
let mut transcript = initial_transcript(V.iter());
// With the torsion clear, wrap it into a EdwardsPoint from dalek-ff-group
// (which is prime-order)
let V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
let generators = generators.reduce(V.len() * N);
let (y, _, _, _, _, A_hat) =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, proof.A);
WipStatement::new(generators, A_hat, y).verify(rng, verifier, id, transcript, proof.wip)
}
}

View File

@@ -1,85 +0,0 @@
#![allow(non_snake_case)]
use group::Group;
use dalek_ff_group::{Scalar, EdwardsPoint};
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
mod point_vector;
pub(crate) use point_vector::PointVector;
pub(crate) mod transcript;
pub(crate) mod weighted_inner_product;
pub(crate) use weighted_inner_product::*;
pub(crate) mod aggregate_range_proof;
pub(crate) use aggregate_range_proof::*;
pub(crate) fn padded_pow_of_2(i: usize) -> usize {
let mut next_pow_of_2 = 1;
while next_pow_of_2 < i {
next_pow_of_2 <<= 1;
}
next_pow_of_2
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub(crate) enum GeneratorsList {
GBold1,
HBold1,
}
// TODO: Table these
#[derive(Clone, Debug)]
pub(crate) struct Generators {
g_bold1: &'static [EdwardsPoint],
h_bold1: &'static [EdwardsPoint],
}
mod generators {
use std_shims::sync::OnceLock;
use monero_generators::Generators;
include!(concat!(env!("OUT_DIR"), "/generators_plus.rs"));
}
impl Generators {
#[allow(clippy::new_without_default)]
pub(crate) fn new() -> Self {
let gens = generators::GENERATORS();
Generators { g_bold1: &gens.G, h_bold1: &gens.H }
}
pub(crate) fn len(&self) -> usize {
self.g_bold1.len()
}
pub(crate) fn g() -> EdwardsPoint {
dalek_ff_group::EdwardsPoint(crate::H())
}
pub(crate) fn h() -> EdwardsPoint {
EdwardsPoint::generator()
}
pub(crate) fn generator(&self, list: GeneratorsList, i: usize) -> EdwardsPoint {
match list {
GeneratorsList::GBold1 => self.g_bold1[i],
GeneratorsList::HBold1 => self.h_bold1[i],
}
}
pub(crate) fn reduce(&self, generators: usize) -> Self {
// Round to the nearest power of 2
let generators = padded_pow_of_2(generators);
assert!(generators <= self.g_bold1.len());
Generators { g_bold1: &self.g_bold1[.. generators], h_bold1: &self.h_bold1[.. generators] }
}
}
// Returns the little-endian decomposition.
fn u64_decompose(value: u64) -> ScalarVector {
let mut bits = ScalarVector::new(64);
for bit in 0 .. 64 {
bits[bit] = Scalar::from((value >> bit) & 1);
}
bits
}

View File

@@ -1,50 +0,0 @@
use core::ops::{Index, IndexMut};
use std_shims::vec::Vec;
use zeroize::{Zeroize, ZeroizeOnDrop};
use dalek_ff_group::EdwardsPoint;
#[cfg(test)]
use multiexp::multiexp;
#[cfg(test)]
use crate::ringct::bulletproofs::plus::ScalarVector;
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct PointVector(pub(crate) Vec<EdwardsPoint>);
impl Index<usize> for PointVector {
type Output = EdwardsPoint;
fn index(&self, index: usize) -> &EdwardsPoint {
&self.0[index]
}
}
impl IndexMut<usize> for PointVector {
fn index_mut(&mut self, index: usize) -> &mut EdwardsPoint {
&mut self.0[index]
}
}
impl PointVector {
#[cfg(test)]
pub(crate) fn multiexp(&self, vector: &ScalarVector) -> EdwardsPoint {
debug_assert_eq!(self.len(), vector.len());
let mut res = Vec::with_capacity(self.len());
for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) {
res.push((scalar, point));
}
multiexp(&res)
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn split(mut self) -> (Self, Self) {
debug_assert!(self.len() > 1);
let r = self.0.split_off(self.0.len() / 2);
debug_assert_eq!(self.len(), r.len());
(self, PointVector(r))
}
}

View File

@@ -1,25 +0,0 @@
use std_shims::{sync::OnceLock, vec::Vec};
use curve25519_dalek::EdwardsPoint;
use dalek_ff_group::Scalar;
use monero_generators::{hash_to_point as raw_hash_to_point};
use crate::{hash, hash_to_scalar as dalek_hash};
// Monero starts BP+ transcripts with the following constant.
static TRANSCRIPT_CELL: OnceLock<[u8; 32]> = OnceLock::new();
pub(crate) fn TRANSCRIPT() -> [u8; 32] {
// Why this uses a hash_to_point is completely unknown.
*TRANSCRIPT_CELL
.get_or_init(|| raw_hash_to_point(hash(b"bulletproof_plus_transcript")).compress().to_bytes())
}
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar(dalek_hash(data))
}
pub(crate) fn initial_transcript(commitments: core::slice::Iter<'_, EdwardsPoint>) -> Scalar {
let commitments_hash =
hash_to_scalar(&commitments.flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>());
hash_to_scalar(&[TRANSCRIPT().as_ref(), &commitments_hash.to_bytes()].concat())
}

View File

@@ -1,444 +0,0 @@
use std_shims::vec::Vec;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
};
// Figure 1 of the Bulletproofs+ paper
#[derive(Clone, Debug)]
pub(crate) struct WipStatement {
generators: Generators,
P: EdwardsPoint,
y: ScalarVector,
}
impl Zeroize for WipStatement {
fn zeroize(&mut self) {
self.P.zeroize();
self.y.zeroize();
}
}
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct WipWitness {
a: ScalarVector,
b: ScalarVector,
alpha: Scalar,
}
impl WipWitness {
pub(crate) fn new(mut a: ScalarVector, mut b: ScalarVector, alpha: Scalar) -> Option<Self> {
if a.0.is_empty() || (a.len() != b.len()) {
return None;
}
// Pad to the nearest power of 2
let missing = padded_pow_of_2(a.len()) - a.len();
a.0.reserve(missing);
b.0.reserve(missing);
for _ in 0 .. missing {
a.0.push(Scalar::ZERO);
b.0.push(Scalar::ZERO);
}
Some(Self { a, b, alpha })
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub(crate) struct WipProof {
pub(crate) L: Vec<EdwardsPoint>,
pub(crate) R: Vec<EdwardsPoint>,
pub(crate) A: EdwardsPoint,
pub(crate) B: EdwardsPoint,
pub(crate) r_answer: Scalar,
pub(crate) s_answer: Scalar,
pub(crate) delta_answer: Scalar,
}
impl WipStatement {
pub(crate) fn new(generators: Generators, P: EdwardsPoint, y: Scalar) -> Self {
debug_assert_eq!(generators.len(), padded_pow_of_2(generators.len()));
// y ** n
let mut y_vec = ScalarVector::new(generators.len());
y_vec[0] = y;
for i in 1 .. y_vec.len() {
y_vec[i] = y_vec[i - 1] * y;
}
Self { generators, P, y: y_vec }
}
fn transcript_L_R(transcript: &mut Scalar, L: EdwardsPoint, R: EdwardsPoint) -> Scalar {
let e = hash_to_scalar(
&[transcript.to_repr().as_ref(), L.to_bytes().as_ref(), R.to_bytes().as_ref()].concat(),
);
*transcript = e;
e
}
fn transcript_A_B(transcript: &mut Scalar, A: EdwardsPoint, B: EdwardsPoint) -> Scalar {
let e = hash_to_scalar(
&[transcript.to_repr().as_ref(), A.to_bytes().as_ref(), B.to_bytes().as_ref()].concat(),
);
*transcript = e;
e
}
// Prover's variant of the shared code block to calculate G/H/P when n > 1
// Returns each permutation of G/H since the prover needs to do operation on each permutation
// P is dropped as it's unused in the prover's path
// TODO: It'd still probably be faster to keep in terms of the original generators, both between
// the reduced amount of group operations and the potential tabling of the generators under
// multiexp
#[allow(clippy::too_many_arguments)]
fn next_G_H(
transcript: &mut Scalar,
mut g_bold1: PointVector,
mut g_bold2: PointVector,
mut h_bold1: PointVector,
mut h_bold2: PointVector,
L: EdwardsPoint,
R: EdwardsPoint,
y_inv_n_hat: Scalar,
) -> (Scalar, Scalar, Scalar, Scalar, PointVector, PointVector) {
debug_assert_eq!(g_bold1.len(), g_bold2.len());
debug_assert_eq!(h_bold1.len(), h_bold2.len());
debug_assert_eq!(g_bold1.len(), h_bold1.len());
let e = Self::transcript_L_R(transcript, L, R);
let inv_e = e.invert().unwrap();
// This vartime is safe as all of these arguments are public
let mut new_g_bold = Vec::with_capacity(g_bold1.len());
let e_y_inv = e * y_inv_n_hat;
for g_bold in g_bold1.0.drain(..).zip(g_bold2.0.drain(..)) {
new_g_bold.push(multiexp_vartime(&[(inv_e, g_bold.0), (e_y_inv, g_bold.1)]));
}
let mut new_h_bold = Vec::with_capacity(h_bold1.len());
for h_bold in h_bold1.0.drain(..).zip(h_bold2.0.drain(..)) {
new_h_bold.push(multiexp_vartime(&[(e, h_bold.0), (inv_e, h_bold.1)]));
}
let e_square = e.square();
let inv_e_square = inv_e.square();
(e, inv_e, e_square, inv_e_square, PointVector(new_g_bold), PointVector(new_h_bold))
}
/*
This has room for optimization worth investigating further. It currently takes
an iterative approach. It can be optimized further via divide and conquer.
Assume there are 4 challenges.
Iterative approach (current):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across that result and column 2.
3. Do the optimal multiplications across that result and column 3.
Divide and conquer (worth investigating further):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across challenge column 2 and 3.
3. Multiply both results together.
When there are 4 challenges (n=16), the iterative approach does 28 multiplications
versus divide and conquer's 24.
*/
fn challenge_products(challenges: &[(Scalar, Scalar)]) -> Vec<Scalar> {
let mut products = vec![Scalar::ONE; 1 << challenges.len()];
if !challenges.is_empty() {
products[0] = challenges[0].1;
products[1] = challenges[0].0;
for (j, challenge) in challenges.iter().enumerate().skip(1) {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * challenge.0;
products[slots - 1] = products[slots / 2] * challenge.1;
slots = slots.saturating_sub(2);
}
}
// Sanity check since if the above failed to populate, it'd be critical
for product in &products {
debug_assert!(!bool::from(product.is_zero()));
}
}
products
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
self,
rng: &mut R,
mut transcript: Scalar,
witness: &WipWitness,
) -> Option<WipProof> {
let WipStatement { generators, P, mut y } = self;
#[cfg(not(debug_assertions))]
let _ = P;
if generators.len() != witness.a.len() {
return None;
}
let (g, h) = (Generators::g(), Generators::h());
let mut g_bold = vec![];
let mut h_bold = vec![];
for i in 0 .. generators.len() {
g_bold.push(generators.generator(GeneratorsList::GBold1, i));
h_bold.push(generators.generator(GeneratorsList::HBold1, i));
}
let mut g_bold = PointVector(g_bold);
let mut h_bold = PointVector(h_bold);
// Check P has the expected relationship
#[cfg(debug_assertions)]
{
let mut P_terms = witness
.a
.0
.iter()
.copied()
.zip(g_bold.0.iter().copied())
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
.collect::<Vec<_>>();
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
P_terms.push((witness.alpha, h));
debug_assert_eq!(multiexp(&P_terms), P);
P_terms.zeroize();
}
let mut a = witness.a.clone();
let mut b = witness.b.clone();
let mut alpha = witness.alpha;
// From here on, g_bold.len() is used as n
debug_assert_eq!(g_bold.len(), a.len());
let mut L_vec = vec![];
let mut R_vec = vec![];
// else n > 1 case from figure 1
while g_bold.len() > 1 {
let (a1, a2) = a.clone().split();
let (b1, b2) = b.clone().split();
let (g_bold1, g_bold2) = g_bold.split();
let (h_bold1, h_bold2) = h_bold.split();
let n_hat = g_bold1.len();
debug_assert_eq!(a1.len(), n_hat);
debug_assert_eq!(a2.len(), n_hat);
debug_assert_eq!(b1.len(), n_hat);
debug_assert_eq!(b2.len(), n_hat);
debug_assert_eq!(g_bold1.len(), n_hat);
debug_assert_eq!(g_bold2.len(), n_hat);
debug_assert_eq!(h_bold1.len(), n_hat);
debug_assert_eq!(h_bold2.len(), n_hat);
let y_n_hat = y[n_hat - 1];
y.0.truncate(n_hat);
let d_l = Scalar::random(&mut *rng);
let d_r = Scalar::random(&mut *rng);
let c_l = a1.clone().weighted_inner_product(&b2, &y);
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
// TODO: Calculate these with a batch inversion
let y_inv_n_hat = y_n_hat.invert().unwrap();
let mut L_terms = (a1.clone() * y_inv_n_hat)
.0
.drain(..)
.zip(g_bold2.0.iter().copied())
.chain(b2.0.iter().copied().zip(h_bold1.0.iter().copied()))
.collect::<Vec<_>>();
L_terms.push((c_l, g));
L_terms.push((d_l, h));
let L = multiexp(&L_terms) * Scalar(crate::INV_EIGHT());
L_vec.push(L);
L_terms.zeroize();
let mut R_terms = (a2.clone() * y_n_hat)
.0
.drain(..)
.zip(g_bold1.0.iter().copied())
.chain(b1.0.iter().copied().zip(h_bold2.0.iter().copied()))
.collect::<Vec<_>>();
R_terms.push((c_r, g));
R_terms.push((d_r, h));
let R = multiexp(&R_terms) * Scalar(crate::INV_EIGHT());
R_vec.push(R);
R_terms.zeroize();
let (e, inv_e, e_square, inv_e_square);
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
b = (b1 * inv_e) + &(b2 * e);
alpha += (d_l * e_square) + (d_r * inv_e_square);
debug_assert_eq!(g_bold.len(), a.len());
debug_assert_eq!(g_bold.len(), h_bold.len());
debug_assert_eq!(g_bold.len(), b.len());
}
// n == 1 case from figure 1
debug_assert_eq!(g_bold.len(), 1);
debug_assert_eq!(h_bold.len(), 1);
debug_assert_eq!(a.len(), 1);
debug_assert_eq!(b.len(), 1);
let r = Scalar::random(&mut *rng);
let s = Scalar::random(&mut *rng);
let delta = Scalar::random(&mut *rng);
let eta = Scalar::random(&mut *rng);
let ry = r * y[0];
let mut A_terms =
vec![(r, g_bold[0]), (s, h_bold[0]), ((ry * b[0]) + (s * y[0] * a[0]), g), (delta, h)];
let A = multiexp(&A_terms) * Scalar(crate::INV_EIGHT());
A_terms.zeroize();
let mut B_terms = vec![(ry * s, g), (eta, h)];
let B = multiexp(&B_terms) * Scalar(crate::INV_EIGHT());
B_terms.zeroize();
let e = Self::transcript_A_B(&mut transcript, A, B);
let r_answer = r + (a[0] * e);
let s_answer = s + (b[0] * e);
let delta_answer = eta + (delta * e) + (alpha * e.square());
Some(WipProof { L: L_vec, R: R_vec, A, B, r_answer, s_answer, delta_answer })
}
pub(crate) fn verify<Id: Copy + Zeroize, R: RngCore + CryptoRng>(
self,
rng: &mut R,
verifier: &mut BatchVerifier<Id, EdwardsPoint>,
id: Id,
mut transcript: Scalar,
mut proof: WipProof,
) -> bool {
let WipStatement { generators, P, y } = self;
let (g, h) = (Generators::g(), Generators::h());
// Verify the L/R lengths
{
let mut lr_len = 0;
while (1 << lr_len) < generators.len() {
lr_len += 1;
}
if (proof.L.len() != lr_len) ||
(proof.R.len() != lr_len) ||
(generators.len() != (1 << lr_len))
{
return false;
}
}
let inv_y = {
let inv_y = y[0].invert().unwrap();
let mut res = Vec::with_capacity(y.len());
res.push(inv_y);
while res.len() < y.len() {
res.push(inv_y * res.last().unwrap());
}
res
};
let mut P_terms = vec![(Scalar::ONE, P)];
P_terms.reserve(6 + (2 * generators.len()) + proof.L.len());
let mut challenges = Vec::with_capacity(proof.L.len());
let product_cache = {
let mut es = Vec::with_capacity(proof.L.len());
for (L, R) in proof.L.iter_mut().zip(proof.R.iter_mut()) {
es.push(Self::transcript_L_R(&mut transcript, *L, *R));
*L = L.mul_by_cofactor();
*R = R.mul_by_cofactor();
}
let mut inv_es = es.clone();
let mut scratch = vec![Scalar::ZERO; es.len()];
group::ff::BatchInverter::invert_with_external_scratch(&mut inv_es, &mut scratch);
drop(scratch);
debug_assert_eq!(es.len(), inv_es.len());
debug_assert_eq!(es.len(), proof.L.len());
debug_assert_eq!(es.len(), proof.R.len());
for ((e, inv_e), (L, R)) in
es.drain(..).zip(inv_es.drain(..)).zip(proof.L.iter().zip(proof.R.iter()))
{
debug_assert_eq!(e.invert().unwrap(), inv_e);
challenges.push((e, inv_e));
let e_square = e.square();
let inv_e_square = inv_e.square();
P_terms.push((e_square, *L));
P_terms.push((inv_e_square, *R));
}
Self::challenge_products(&challenges)
};
let e = Self::transcript_A_B(&mut transcript, proof.A, proof.B);
proof.A = proof.A.mul_by_cofactor();
proof.B = proof.B.mul_by_cofactor();
let neg_e_square = -e.square();
let mut multiexp = P_terms;
multiexp.reserve(4 + (2 * generators.len()));
for (scalar, _) in &mut multiexp {
*scalar *= neg_e_square;
}
let re = proof.r_answer * e;
for i in 0 .. generators.len() {
let mut scalar = product_cache[i] * re;
if i > 0 {
scalar *= inv_y[i - 1];
}
multiexp.push((scalar, generators.generator(GeneratorsList::GBold1, i)));
}
let se = proof.s_answer * e;
for i in 0 .. generators.len() {
multiexp.push((
se * product_cache[product_cache.len() - 1 - i],
generators.generator(GeneratorsList::HBold1, i),
));
}
multiexp.push((-e, proof.A));
multiexp.push((proof.r_answer * y[0] * proof.s_answer, g));
multiexp.push((proof.delta_answer, h));
multiexp.push((-Scalar::ONE, proof.B));
verifier.queue(rng, id, multiexp);
true
}
}

View File

@@ -1,138 +0,0 @@
use core::{
borrow::Borrow,
ops::{Index, IndexMut, Add, Sub, Mul},
};
use std_shims::vec::Vec;
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::ff::Field;
use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp;
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
impl Index<usize> for ScalarVector {
type Output = Scalar;
fn index(&self, index: usize) -> &Scalar {
&self.0[index]
}
}
impl IndexMut<usize> for ScalarVector {
fn index_mut(&mut self, index: usize) -> &mut Scalar {
&mut self.0[index]
}
}
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
type Output = ScalarVector;
fn add(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s += scalar.borrow();
}
self
}
}
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
type Output = ScalarVector;
fn sub(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s -= scalar.borrow();
}
self
}
}
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
type Output = ScalarVector;
fn mul(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s *= scalar.borrow();
}
self
}
}
impl Add<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn add(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
self
}
}
impl Sub<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s -= o;
}
self
}
}
impl Mul<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s *= o;
}
self
}
}
impl Mul<&[EdwardsPoint]> for &ScalarVector {
type Output = EdwardsPoint;
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
debug_assert_eq!(self.len(), b.len());
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
let res = multiexp(&multiexp_args);
multiexp_args.zeroize();
res
}
}
impl ScalarVector {
pub(crate) fn new(len: usize) -> Self {
ScalarVector(vec![Scalar::ZERO; len])
}
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
debug_assert!(len != 0);
let mut res = Vec::with_capacity(len);
res.push(Scalar::ONE);
res.push(x);
for i in 2 .. len {
res.push(res[i - 1] * x);
}
res.truncate(len);
ScalarVector(res)
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn sum(mut self) -> Scalar {
self.0.drain(..).sum()
}
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
(self * vector).sum()
}
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
(self * vector * y).sum()
}
pub(crate) fn split(mut self) -> (Self, Self) {
debug_assert!(self.len() > 1);
let r = self.0.split_off(self.0.len() / 2);
debug_assert_eq!(self.len(), r.len());
(self, ScalarVector(r))
}
}

View File

@@ -18,7 +18,7 @@ pub use monero_clsag as clsag;
/// BorromeanRange struct, along with verifying functionality.
pub mod borromean;
/// Bulletproofs(+) structs, along with proving and verifying functionality.
pub mod bulletproofs;
pub use monero_bulletproofs as bulletproofs;
use crate::{
Protocol,

View File

@@ -1,108 +0,0 @@
use hex_literal::hex;
use rand_core::OsRng;
use curve25519_dalek::scalar::Scalar;
use monero_io::decompress_point;
use multiexp::BatchVerifier;
use crate::{
Commitment,
ringct::bulletproofs::{Bulletproof, original::OriginalStruct},
wallet::TransactionError,
};
mod plus;
#[test]
fn bulletproofs_vector() {
let scalar = |scalar| Scalar::from_canonical_bytes(scalar).unwrap();
let point = |point| decompress_point(point).unwrap();
// Generated from Monero
assert!(Bulletproof::Original(OriginalStruct {
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
T2: point(hex!("b8390aa4b60b255630d40e592f55ec6b7ab5e3a96bfcdcd6f1cd1d2fc95f441e")),
taux: scalar(hex!("5957dba8ea9afb23d6e81cc048a92f2d502c10c749dc1b2bd148ae8d41ec7107")),
mu: scalar(hex!("923023b234c2e64774b820b4961f7181f6c1dc152c438643e5a25b0bf271bc02")),
L: vec![
point(hex!("c45f656316b9ebf9d357fb6a9f85b5f09e0b991dd50a6e0ae9b02de3946c9d99")),
point(hex!("9304d2bf0f27183a2acc58cc755a0348da11bd345485fda41b872fee89e72aac")),
point(hex!("1bb8b71925d155dd9569f64129ea049d6149fdc4e7a42a86d9478801d922129b")),
point(hex!("5756a7bf887aa72b9a952f92f47182122e7b19d89e5dd434c747492b00e1c6b7")),
point(hex!("6e497c910d102592830555356af5ff8340e8d141e3fb60ea24cfa587e964f07d")),
point(hex!("f4fa3898e7b08e039183d444f3d55040f3c790ed806cb314de49f3068bdbb218")),
point(hex!("0bbc37597c3ead517a3841e159c8b7b79a5ceaee24b2a9a20350127aab428713")),
],
R: vec![
point(hex!("609420ba1702781692e84accfd225adb3d077aedc3cf8125563400466b52dbd9")),
point(hex!("fb4e1d079e7a2b0ec14f7e2a3943bf50b6d60bc346a54fcf562fb234b342abf8")),
point(hex!("6ae3ac97289c48ce95b9c557289e82a34932055f7f5e32720139824fe81b12e5")),
point(hex!("d071cc2ffbdab2d840326ad15f68c01da6482271cae3cf644670d1632f29a15c")),
point(hex!("e52a1754b95e1060589ba7ce0c43d0060820ebfc0d49dc52884bc3c65ad18af5")),
point(hex!("41573b06140108539957df71aceb4b1816d2409ce896659aa5c86f037ca5e851")),
point(hex!("a65970b2cc3c7b08b2b5b739dbc8e71e646783c41c625e2a5b1535e3d2e0f742")),
],
a: scalar(hex!("0077c5383dea44d3cd1bc74849376bd60679612dc4b945255822457fa0c0a209")),
b: scalar(hex!("fe80cf5756473482581e1d38644007793ddc66fdeb9404ec1689a907e4863302")),
t: scalar(hex!("40dfb08e09249040df997851db311bd6827c26e87d6f0f332c55be8eef10e603"))
})
.verify(
&mut OsRng,
&[
// For some reason, these vectors are * INV_EIGHT
point(hex!("8e8f23f315edae4f6c2f948d9a861e0ae32d356b933cd11d2f0e031ac744c41f"))
.mul_by_cofactor(),
point(hex!("2829cbd025aa54cd6e1b59a032564f22f0b2e5627f7f2c4297f90da438b5510f"))
.mul_by_cofactor(),
]
));
}
macro_rules! bulletproofs_tests {
($name: ident, $max: ident, $plus: literal) => {
#[test]
fn $name() {
// Create Bulletproofs for all possible output quantities
let mut verifier = BatchVerifier::new(16);
for i in 1 ..= 16 {
let commitments = (1 ..= i)
.map(|i| Commitment::new(Scalar::random(&mut OsRng), u64::try_from(i).unwrap()))
.collect::<Vec<_>>();
let bp = if $plus {
Bulletproof::prove_plus(&mut OsRng, commitments.clone()).unwrap()
} else {
Bulletproof::prove(&mut OsRng, &commitments).unwrap()
};
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
assert!(bp.verify(&mut OsRng, &commitments));
assert!(bp.batch_verify(&mut OsRng, &mut verifier, i, &commitments));
}
assert!(verifier.verify_vartime());
}
#[test]
fn $max() {
// Check Bulletproofs errors if we try to prove for too many outputs
let mut commitments = vec![];
for _ in 0 .. 17 {
commitments.push(Commitment::new(Scalar::ZERO, 0));
}
assert_eq!(
(if $plus {
Bulletproof::prove_plus(&mut OsRng, commitments)
} else {
Bulletproof::prove(&mut OsRng, &commitments)
})
.unwrap_err(),
TransactionError::TooManyOutputs,
);
}
};
}
bulletproofs_tests!(bulletproofs, bulletproofs_max, false);
bulletproofs_tests!(bulletproofs_plus, bulletproofs_plus_max, true);

View File

@@ -1,30 +0,0 @@
use rand_core::{RngCore, OsRng};
use multiexp::BatchVerifier;
use group::ff::Field;
use dalek_ff_group::Scalar;
use crate::{
Commitment,
ringct::bulletproofs::plus::aggregate_range_proof::{
AggregateRangeStatement, AggregateRangeWitness,
},
};
#[test]
fn test_aggregate_range_proof() {
let mut verifier = BatchVerifier::new(16);
for m in 1 ..= 16 {
let mut commitments = vec![];
for _ in 0 .. m {
commitments.push(Commitment::new(*Scalar::random(&mut OsRng), OsRng.next_u64()));
}
let commitment_points = commitments.iter().map(Commitment::calculate).collect();
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
let witness = AggregateRangeWitness::new(commitments).unwrap();
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
statement.verify(&mut OsRng, &mut verifier, (), proof);
}
assert!(verifier.verify_vartime());
}

View File

@@ -1,4 +0,0 @@
#[cfg(test)]
mod weighted_inner_product;
#[cfg(test)]
mod aggregate_range_proof;

View File

@@ -1,81 +0,0 @@
// The inner product relation is P = sum(g_bold * a, h_bold * b, g * (a * y * b), h * alpha)
use rand_core::OsRng;
use multiexp::BatchVerifier;
use group::{ff::Field, Group};
use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators,
weighted_inner_product::{WipStatement, WipWitness},
};
#[test]
fn test_zero_weighted_inner_product() {
#[allow(non_snake_case)]
let P = EdwardsPoint::identity();
let y = Scalar::random(&mut OsRng);
let generators = Generators::new().reduce(1);
let statement = WipStatement::new(generators, P, y);
let witness = WipWitness::new(ScalarVector::new(1), ScalarVector::new(1), Scalar::ZERO).unwrap();
let transcript = Scalar::random(&mut OsRng);
let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap();
let mut verifier = BatchVerifier::new(1);
statement.verify(&mut OsRng, &mut verifier, (), transcript, proof);
assert!(verifier.verify_vartime());
}
#[test]
fn test_weighted_inner_product() {
// P = sum(g_bold * a, h_bold * b, g * (a * y * b), h * alpha)
let mut verifier = BatchVerifier::new(6);
let generators = Generators::new();
for i in [1, 2, 4, 8, 16, 32] {
let generators = generators.reduce(i);
let g = Generators::g();
let h = Generators::h();
assert_eq!(generators.len(), i);
let mut g_bold = vec![];
let mut h_bold = vec![];
for i in 0 .. i {
g_bold.push(generators.generator(GeneratorsList::GBold1, i));
h_bold.push(generators.generator(GeneratorsList::HBold1, i));
}
let g_bold = PointVector(g_bold);
let h_bold = PointVector(h_bold);
let mut a = ScalarVector::new(i);
let mut b = ScalarVector::new(i);
let alpha = Scalar::random(&mut OsRng);
let y = Scalar::random(&mut OsRng);
let mut y_vec = ScalarVector::new(g_bold.len());
y_vec[0] = y;
for i in 1 .. y_vec.len() {
y_vec[i] = y_vec[i - 1] * y;
}
for i in 0 .. i {
a[i] = Scalar::random(&mut OsRng);
b[i] = Scalar::random(&mut OsRng);
}
#[allow(non_snake_case)]
let P = g_bold.multiexp(&a) +
h_bold.multiexp(&b) +
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
(h * alpha);
let statement = WipStatement::new(generators, P, y);
let witness = WipWitness::new(a, b, alpha).unwrap();
let transcript = Scalar::random(&mut OsRng);
let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap();
statement.verify(&mut OsRng, &mut verifier, (), transcript, proof);
}
assert!(verifier.verify_vartime());
}

View File

@@ -1,5 +1,4 @@
mod unreduced_scalar;
mod bulletproofs;
mod address;
mod seed;
mod extra;

View File

@@ -33,7 +33,7 @@ use crate::{
ringct::{
generate_key_image,
clsag::{ClsagError, ClsagContext, Clsag},
bulletproofs::{MAX_OUTPUTS, Bulletproof},
bulletproofs::{MAX_COMMITMENTS, Bulletproof},
RctBase, RctPrunable, RctSignatures,
},
transaction::{Input, Output, Timelock, TransactionPrefix, Transaction},
@@ -504,7 +504,7 @@ impl SignableTransaction {
let out_amount = payments.iter().map(|payment| payment.1).sum::<u64>();
let outputs = payments.len() + usize::from(change.address.is_some());
if outputs > MAX_OUTPUTS {
if outputs > MAX_COMMITMENTS {
Err(TransactionError::TooManyOutputs)?;
}
@@ -803,7 +803,7 @@ impl SignableTransaction {
let commitments = outputs.iter().map(|output| output.commitment.clone()).collect::<Vec<_>>();
let sum = commitments.iter().map(|commitment| commitment.mask).sum();
// Safe due to the constructor checking MAX_OUTPUTS
// Safe due to the constructor checking MAX_COMMITMENTS
let bp = if self.protocol.bp_plus() {
Bulletproof::prove_plus(rng, commitments.clone()).unwrap()
} else {