mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-10 21:19:24 +00:00
Monero: add more legacy verify functions (#383)
* Add v1 ring sig verifying * allow calculating signature hash for v1 txs * add unreduced scalar type with recovery I have added this type for borromen sigs, the ee field can be a normal scalar as in the verify function the ee field is checked against a reduced scalar mean for it to verify as correct ee must be reduced * change block major/ minor versions to u8 this matches Monero I have also changed a couple varint functions to accept the `VarInt` trait * expose `serialize_hashable` on `Block` * add back MLSAG verifying functions I still need to revert the commit removing support for >1 input MLSAG FULL This adds a new rct type to separate Full and simple rct * add back support for multiple inputs for RCT FULL * comment `non_adjacent_form` function also added `#[allow(clippy::needless_range_loop)]` around a loop as without a re-write satisfying clippy without it will make the function worse. * Improve Mlsag verifying API * fix rebase errors * revert the changes on `reserialize_chain` plus other misc changes * fix no-std * Reduce the amount of rpc calls needed for `get_block_by_number`. This function was causing me problems, every now and then a node would return a block with a different number than requested. * change `serialize_hashable` to give the POW hashing blob. Monero calculates the POW hash and the block hash using *slightly* different blobs :/ * make ring_signatures public and add length check when verifying. * Misc improvements and bug fixes --------- Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
This commit is contained in:
@@ -1,73 +1,63 @@
|
||||
use core::fmt::Debug;
|
||||
use std_shims::io::{self, Read, Write};
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
#[cfg(feature = "experimental")]
|
||||
use curve25519_dalek::{traits::Identity, scalar::Scalar};
|
||||
use curve25519_dalek::{traits::Identity, Scalar, EdwardsPoint};
|
||||
|
||||
#[cfg(feature = "experimental")]
|
||||
use monero_generators::H_pow_2;
|
||||
#[cfg(feature = "experimental")]
|
||||
use crate::hash_to_scalar;
|
||||
use crate::serialize::*;
|
||||
|
||||
use crate::{hash_to_scalar, unreduced_scalar::UnreducedScalar, serialize::*};
|
||||
|
||||
/// 64 Borromean ring signatures.
|
||||
///
|
||||
/// This type keeps the data as raw bytes as Monero has some transactions with unreduced scalars in
|
||||
/// this field. While we could use `from_bytes_mod_order`, we'd then not be able to encode this
|
||||
/// back into it's original form.
|
||||
///
|
||||
/// Those scalars also have a custom reduction algorithm...
|
||||
/// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
|
||||
/// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
|
||||
/// algorithm which was in use.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BorromeanSignatures {
|
||||
pub s0: [[u8; 32]; 64],
|
||||
pub s1: [[u8; 32]; 64],
|
||||
pub ee: [u8; 32],
|
||||
pub s0: [UnreducedScalar; 64],
|
||||
pub s1: [UnreducedScalar; 64],
|
||||
pub ee: Scalar,
|
||||
}
|
||||
|
||||
impl BorromeanSignatures {
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
Ok(BorromeanSignatures {
|
||||
s0: read_array(read_bytes, r)?,
|
||||
s1: read_array(read_bytes, r)?,
|
||||
ee: read_bytes(r)?,
|
||||
s0: read_array(UnreducedScalar::read, r)?,
|
||||
s1: read_array(UnreducedScalar::read, r)?,
|
||||
ee: read_scalar(r)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for s0 in &self.s0 {
|
||||
w.write_all(s0)?;
|
||||
s0.write(w)?;
|
||||
}
|
||||
for s1 in &self.s1 {
|
||||
w.write_all(s1)?;
|
||||
s1.write(w)?;
|
||||
}
|
||||
w.write_all(&self.ee)
|
||||
write_scalar(&self.ee, w)
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental")]
|
||||
fn verify(&self, keys_a: &[EdwardsPoint], keys_b: &[EdwardsPoint]) -> bool {
|
||||
let mut transcript = [0; 2048];
|
||||
|
||||
for i in 0 .. 64 {
|
||||
// TODO: These aren't the correct reduction
|
||||
// TODO: Can either of these be tightened?
|
||||
#[allow(non_snake_case)]
|
||||
let LL = EdwardsPoint::vartime_double_scalar_mul_basepoint(
|
||||
&Scalar::from_bytes_mod_order(self.ee),
|
||||
&self.ee,
|
||||
&keys_a[i],
|
||||
&Scalar::from_bytes_mod_order(self.s0[i]),
|
||||
&self.s0[i].recover_monero_slide_scalar(),
|
||||
);
|
||||
#[allow(non_snake_case)]
|
||||
let LV = EdwardsPoint::vartime_double_scalar_mul_basepoint(
|
||||
&hash_to_scalar(LL.compress().as_bytes()),
|
||||
&keys_b[i],
|
||||
&Scalar::from_bytes_mod_order(self.s1[i]),
|
||||
&self.s1[i].recover_monero_slide_scalar(),
|
||||
);
|
||||
transcript[i .. ((i + 1) * 32)].copy_from_slice(LV.compress().as_bytes());
|
||||
transcript[(i * 32) .. ((i + 1) * 32)].copy_from_slice(LV.compress().as_bytes());
|
||||
}
|
||||
|
||||
// TODO: This isn't the correct reduction
|
||||
// TODO: Can this be tightened to from_canonical_bytes?
|
||||
hash_to_scalar(&transcript) == Scalar::from_bytes_mod_order(self.ee)
|
||||
hash_to_scalar(&transcript) == self.ee
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +80,6 @@ impl BorromeanRange {
|
||||
write_raw_vec(write_point, &self.bit_commitments, w)
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental")]
|
||||
pub fn verify(&self, commitment: &EdwardsPoint) -> bool {
|
||||
if &self.bit_commitments.iter().sum::<EdwardsPoint>() != commitment {
|
||||
return false;
|
||||
|
||||
@@ -180,7 +180,7 @@ fn core(
|
||||
let c_c = mu_C * c;
|
||||
|
||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
||||
let PH = hash_to_point(P[i]);
|
||||
let PH = hash_to_point(&P[i]);
|
||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
||||
|
||||
@@ -219,7 +219,7 @@ impl Clsag {
|
||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
||||
let z = input.commitment.mask - mask;
|
||||
|
||||
let H = hash_to_point(input.decoys.ring[r][0]);
|
||||
let H = hash_to_point(&input.decoys.ring[r][0]);
|
||||
let D = H * z;
|
||||
let mut s = Vec::with_capacity(input.decoys.ring.len());
|
||||
for _ in 0 .. input.decoys.ring.len() {
|
||||
@@ -259,7 +259,7 @@ impl Clsag {
|
||||
&msg,
|
||||
nonce.deref() * ED25519_BASEPOINT_TABLE,
|
||||
nonce.deref() *
|
||||
hash_to_point(inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
);
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
||||
|
||||
@@ -116,7 +116,7 @@ impl ClsagMultisig {
|
||||
ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
H: hash_to_point(output_key),
|
||||
H: hash_to_point(&output_key),
|
||||
image: EdwardsPoint::identity(),
|
||||
|
||||
details,
|
||||
|
||||
@@ -3,6 +3,6 @@ use curve25519_dalek::edwards::EdwardsPoint;
|
||||
pub use monero_generators::{hash_to_point as raw_hash_to_point};
|
||||
|
||||
/// Monero's hash to point function, as named `ge_fromfe_frombytes_vartime`.
|
||||
pub fn hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
|
||||
pub fn hash_to_point(key: &EdwardsPoint) -> EdwardsPoint {
|
||||
raw_hash_to_point(key.compress().to_bytes())
|
||||
}
|
||||
|
||||
@@ -3,17 +3,82 @@ use std_shims::{
|
||||
io::{self, Read, Write},
|
||||
};
|
||||
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
#[cfg(feature = "experimental")]
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use crate::serialize::*;
|
||||
#[cfg(feature = "experimental")]
|
||||
use crate::{hash_to_scalar, ringct::hash_to_point};
|
||||
use curve25519_dalek::{traits::IsIdentity, Scalar, EdwardsPoint};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
use monero_generators::H;
|
||||
|
||||
use crate::{hash_to_scalar, ringct::hash_to_point, serialize::*};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum MlsagError {
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
#[cfg_attr(feature = "std", error("invalid amount of key images"))]
|
||||
InvalidAmountOfKeyImages,
|
||||
#[cfg_attr(feature = "std", error("invalid ss"))]
|
||||
InvalidSs,
|
||||
#[cfg_attr(feature = "std", error("key image was identity"))]
|
||||
IdentityKeyImage,
|
||||
#[cfg_attr(feature = "std", error("invalid ci"))]
|
||||
InvalidCi,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingMatrix {
|
||||
matrix: Vec<Vec<EdwardsPoint>>,
|
||||
}
|
||||
|
||||
impl RingMatrix {
|
||||
pub fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
if matrix.is_empty() {
|
||||
Err(MlsagError::InvalidRing)?;
|
||||
}
|
||||
for member in &matrix {
|
||||
if member.is_empty() || (member.len() != matrix[0].len()) {
|
||||
Err(MlsagError::InvalidRing)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RingMatrix { matrix })
|
||||
}
|
||||
|
||||
/// Construct a ring matrix for an individual output.
|
||||
pub fn individual(
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
pseudo_out: EdwardsPoint,
|
||||
) -> Result<Self, MlsagError> {
|
||||
let mut matrix = Vec::with_capacity(ring.len());
|
||||
for ring_member in ring {
|
||||
matrix.push(vec![ring_member[0], ring_member[1] - pseudo_out]);
|
||||
}
|
||||
RingMatrix::new(matrix)
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
self.matrix.iter().map(AsRef::as_ref)
|
||||
}
|
||||
|
||||
/// Return the amount of members in the ring.
|
||||
pub fn members(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
|
||||
/// Returns the length of a ring member.
|
||||
///
|
||||
/// A ring member is a vector of points for which the signer knows all of the discrete logarithms
|
||||
/// of.
|
||||
pub fn member_len(&self) -> usize {
|
||||
// this is safe to do as the constructors don't allow empty rings
|
||||
self.matrix[0].len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Mlsag {
|
||||
pub ss: Vec<[Scalar; 2]>,
|
||||
pub ss: Vec<Vec<Scalar>>,
|
||||
pub cc: Scalar,
|
||||
}
|
||||
|
||||
@@ -25,47 +90,124 @@ impl Mlsag {
|
||||
write_scalar(&self.cc, w)
|
||||
}
|
||||
|
||||
pub fn read<R: Read>(mixins: usize, r: &mut R) -> io::Result<Mlsag> {
|
||||
pub fn read<R: Read>(mixins: usize, ss_2_elements: usize, r: &mut R) -> io::Result<Mlsag> {
|
||||
Ok(Mlsag {
|
||||
ss: (0 .. mixins).map(|_| read_array(read_scalar, r)).collect::<Result<_, _>>()?,
|
||||
ss: (0 .. mixins)
|
||||
.map(|_| read_raw_vec(read_scalar, ss_2_elements, r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
cc: read_scalar(r)?,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental")]
|
||||
pub fn verify(
|
||||
&self,
|
||||
msg: &[u8; 32],
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
key_image: &EdwardsPoint,
|
||||
) -> bool {
|
||||
if ring.is_empty() {
|
||||
return false;
|
||||
ring: &RingMatrix,
|
||||
key_images: &[EdwardsPoint],
|
||||
) -> Result<(), MlsagError> {
|
||||
// Mlsag allows for layers to not need linkability, hence they don't need key images
|
||||
// Monero requires that there is always only 1 non-linkable layer - the amount commitments.
|
||||
if ring.member_len() != (key_images.len() + 1) {
|
||||
Err(MlsagError::InvalidAmountOfKeyImages)?;
|
||||
}
|
||||
|
||||
let mut buf = Vec::with_capacity(6 * 32);
|
||||
buf.extend_from_slice(msg);
|
||||
|
||||
let mut ci = self.cc;
|
||||
for (i, ring_member) in ring.iter().enumerate() {
|
||||
buf.extend_from_slice(msg);
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let L =
|
||||
|r| EdwardsPoint::vartime_double_scalar_mul_basepoint(&ci, &ring_member[r], &self.ss[i][r]);
|
||||
// This is an iterator over the key images as options with an added entry of `None` at the
|
||||
// end for the non-linkable layer
|
||||
let key_images_iter = key_images.iter().map(|ki| Some(*ki)).chain(core::iter::once(None));
|
||||
|
||||
buf.extend_from_slice(ring_member[0].compress().as_bytes());
|
||||
buf.extend_from_slice(L(0).compress().as_bytes());
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = (self.ss[i][0] * hash_to_point(ring_member[0])) + (ci * key_image);
|
||||
buf.extend_from_slice(R.compress().as_bytes());
|
||||
|
||||
buf.extend_from_slice(ring_member[1].compress().as_bytes());
|
||||
buf.extend_from_slice(L(1).compress().as_bytes());
|
||||
|
||||
ci = hash_to_scalar(&buf);
|
||||
buf.clear();
|
||||
if ring.matrix.len() != self.ss.len() {
|
||||
Err(MlsagError::InvalidSs)?;
|
||||
}
|
||||
|
||||
ci == self.cc
|
||||
for (ring_member, ss) in ring.iter().zip(&self.ss) {
|
||||
if ring_member.len() != ss.len() {
|
||||
Err(MlsagError::InvalidSs)?;
|
||||
}
|
||||
|
||||
for ((ring_member_entry, s), ki) in ring_member.iter().zip(ss).zip(key_images_iter.clone()) {
|
||||
#[allow(non_snake_case)]
|
||||
let L = EdwardsPoint::vartime_double_scalar_mul_basepoint(&ci, ring_member_entry, s);
|
||||
|
||||
buf.extend_from_slice(ring_member_entry.compress().as_bytes());
|
||||
buf.extend_from_slice(L.compress().as_bytes());
|
||||
|
||||
// Not all dimensions need to be linkable, e.g. commitments, and only linkable layers need
|
||||
// to have key images.
|
||||
if let Some(ki) = ki {
|
||||
if ki.is_identity() {
|
||||
Err(MlsagError::IdentityKeyImage)?;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = (s * hash_to_point(ring_member_entry)) + (ci * ki);
|
||||
buf.extend_from_slice(R.compress().as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
ci = hash_to_scalar(&buf);
|
||||
// keep the msg in the buffer.
|
||||
buf.drain(msg.len() ..);
|
||||
}
|
||||
|
||||
if ci != self.cc {
|
||||
Err(MlsagError::InvalidCi)?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// An aggregate ring matrix builder, usable to set up the ring matrix to prove/verify an aggregate
|
||||
/// MLSAG signature.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRingMatrixBuilder {
|
||||
key_ring: Vec<Vec<EdwardsPoint>>,
|
||||
amounts_ring: Vec<EdwardsPoint>,
|
||||
sum_out: EdwardsPoint,
|
||||
}
|
||||
|
||||
impl AggregateRingMatrixBuilder {
|
||||
/// Create a new AggregateRingMatrixBuilder.
|
||||
///
|
||||
/// Takes in the transaction's outputs; commitments and fee.
|
||||
pub fn new(commitments: &[EdwardsPoint], fee: u64) -> Self {
|
||||
AggregateRingMatrixBuilder {
|
||||
key_ring: vec![],
|
||||
amounts_ring: vec![],
|
||||
sum_out: commitments.iter().sum::<EdwardsPoint>() + (H() * Scalar::from(fee)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a ring of [output key, commitment] to the matrix.
|
||||
pub fn push_ring(&mut self, ring: &[[EdwardsPoint; 2]]) -> Result<(), MlsagError> {
|
||||
if self.key_ring.is_empty() {
|
||||
self.key_ring = vec![vec![]; ring.len()];
|
||||
// Now that we know the length of the ring, fill the `amounts_ring`.
|
||||
self.amounts_ring = vec![-self.sum_out; ring.len()];
|
||||
}
|
||||
|
||||
if (self.amounts_ring.len() != ring.len()) || ring.is_empty() {
|
||||
// All the rings in an aggregate matrix must be the same length.
|
||||
return Err(MlsagError::InvalidRing);
|
||||
}
|
||||
|
||||
for (i, ring_member) in ring.iter().enumerate() {
|
||||
self.key_ring[i].push(ring_member[0]);
|
||||
self.amounts_ring[i] += ring_member[1]
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build and return the [`RingMatrix`]
|
||||
pub fn build(mut self) -> Result<RingMatrix, MlsagError> {
|
||||
for (i, amount_commitment) in self.amounts_ring.drain(..).enumerate() {
|
||||
self.key_ring[i].push(amount_commitment);
|
||||
}
|
||||
RingMatrix::new(self.key_ring)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ use crate::{
|
||||
|
||||
/// Generate a key image for a given key. Defined as `x * hash_to_point(xG)`.
|
||||
pub fn generate_key_image(secret: &Zeroizing<Scalar>) -> EdwardsPoint {
|
||||
hash_to_point(ED25519_BASEPOINT_TABLE * secret.deref()) * secret.deref()
|
||||
hash_to_point(&(ED25519_BASEPOINT_TABLE * secret.deref())) * secret.deref()
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -61,7 +61,7 @@ impl EncryptedAmount {
|
||||
pub enum RctType {
|
||||
/// No RCT proofs.
|
||||
Null,
|
||||
/// One MLSAG for a single input and a Borromean range proof (RCTTypeFull).
|
||||
/// One MLSAG for multiple inputs and Borromean range proofs (RCTTypeFull).
|
||||
MlsagAggregate,
|
||||
// One MLSAG for each input and a Borromean range proof (RCTTypeSimple).
|
||||
MlsagIndividual,
|
||||
@@ -194,6 +194,10 @@ impl RctBase {
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RctPrunable {
|
||||
Null,
|
||||
AggregateMlsagBorromean {
|
||||
borromean: Vec<BorromeanRange>,
|
||||
mlsag: Mlsag,
|
||||
},
|
||||
MlsagBorromean {
|
||||
borromean: Vec<BorromeanRange>,
|
||||
mlsags: Vec<Mlsag>,
|
||||
@@ -220,6 +224,10 @@ impl RctPrunable {
|
||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => Ok(()),
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, mlsag } => {
|
||||
write_raw_vec(BorromeanRange::write, borromean, w)?;
|
||||
mlsag.write(w)
|
||||
}
|
||||
RctPrunable::MlsagBorromean { borromean, mlsags } => {
|
||||
write_raw_vec(BorromeanRange::write, borromean, w)?;
|
||||
write_raw_vec(Mlsag::write, mlsags, w)
|
||||
@@ -270,9 +278,13 @@ impl RctPrunable {
|
||||
|
||||
Ok(match rct_type {
|
||||
RctType::Null => RctPrunable::Null,
|
||||
RctType::MlsagAggregate | RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
||||
RctType::MlsagAggregate => RctPrunable::AggregateMlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, r)).collect::<Result<_, _>>()?,
|
||||
mlsag: Mlsag::read(decoys[0], decoys.len() + 1, r)?,
|
||||
},
|
||||
RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::<Result<_, _>>()?,
|
||||
},
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount => {
|
||||
RctPrunable::MlsagBulletproofs {
|
||||
@@ -287,13 +299,13 @@ impl RctPrunable {
|
||||
}
|
||||
Bulletproofs::read(r)?
|
||||
},
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, r)).collect::<Result<_, _>>()?,
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?,
|
||||
}
|
||||
}
|
||||
RctType::Clsag | RctType::BulletproofsPlus => RctPrunable::Clsag {
|
||||
bulletproofs: {
|
||||
if read_varint(r)? != 1 {
|
||||
if read_varint::<_, u64>(r)? != 1 {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "n bulletproofs instead of one"))?;
|
||||
}
|
||||
(if rct_type == RctType::Clsag { Bulletproofs::read } else { Bulletproofs::read_plus })(
|
||||
@@ -309,6 +321,7 @@ impl RctPrunable {
|
||||
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => panic!("Serializing RctPrunable::Null for a signature"),
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, .. } |
|
||||
RctPrunable::MlsagBorromean { borromean, .. } => {
|
||||
borromean.iter().try_for_each(|rs| rs.write(w))
|
||||
}
|
||||
@@ -329,30 +342,8 @@ impl RctSignatures {
|
||||
pub fn rct_type(&self) -> RctType {
|
||||
match &self.prunable {
|
||||
RctPrunable::Null => RctType::Null,
|
||||
RctPrunable::MlsagBorromean { .. } => {
|
||||
/*
|
||||
This type of RctPrunable may have no outputs, yet pseudo_outs are per input
|
||||
This will only be a valid RctSignatures if it's for a TX with inputs
|
||||
That makes this valid for any valid RctSignatures
|
||||
|
||||
While it will be invalid for any invalid RctSignatures, potentially letting an invalid
|
||||
MlsagAggregate be interpreted as a valid MlsagIndividual (or vice versa), they have
|
||||
incompatible deserializations
|
||||
|
||||
This means it's impossible to receive a MlsagAggregate over the wire and interpret it
|
||||
as a MlsagIndividual (or vice versa)
|
||||
|
||||
That only makes manual manipulation unsafe, which will always be true since these fields
|
||||
are all pub
|
||||
|
||||
TODO: Consider making them private with read-only accessors?
|
||||
*/
|
||||
if self.base.pseudo_outs.is_empty() {
|
||||
RctType::MlsagAggregate
|
||||
} else {
|
||||
RctType::MlsagIndividual
|
||||
}
|
||||
}
|
||||
RctPrunable::AggregateMlsagBorromean { .. } => RctType::MlsagAggregate,
|
||||
RctPrunable::MlsagBorromean { .. } => RctType::MlsagIndividual,
|
||||
// RctBase ensures there's at least one output, making the following
|
||||
// inferences guaranteed/expects impossible on any valid RctSignatures
|
||||
RctPrunable::MlsagBulletproofs { .. } => {
|
||||
|
||||
Reference in New Issue
Block a user