Further documentation, start shoring up API boundaries of existing crates

This commit is contained in:
Luke Parker
2024-06-14 11:47:57 -04:00
parent 784a273747
commit 9c217913e6
16 changed files with 195 additions and 116 deletions

2
Cargo.lock generated
View File

@@ -4765,7 +4765,6 @@ dependencies = [
"monero-primitives", "monero-primitives",
"rand_chacha", "rand_chacha",
"rand_core", "rand_core",
"sha3",
"std-shims", "std-shims",
"subtle", "subtle",
"thiserror", "thiserror",
@@ -4801,7 +4800,6 @@ dependencies = [
"curve25519-dalek", "curve25519-dalek",
"monero-generators", "monero-generators",
"monero-io", "monero-io",
"rand_core",
"sha3", "sha3",
"std-shims", "std-shims",
"zeroize", "zeroize",

View File

@@ -9,7 +9,7 @@ use monero_io::decompress_point;
use crate::keccak256; use crate::keccak256;
/// Monero's hash to point function, as named `hash_to_ec`. /// Monero's `hash_to_ec` function.
pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint { pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
#[allow(non_snake_case)] #[allow(non_snake_case)]
let A = FieldElement::from(486662u64); let A = FieldElement::from(486662u64);

View File

@@ -24,7 +24,10 @@ fn keccak256(data: &[u8]) -> [u8; 32] {
} }
static H_CELL: OnceLock<DalekPoint> = OnceLock::new(); static H_CELL: OnceLock<DalekPoint> = OnceLock::new();
/// Monero's alternate generator `H`, used for amounts in Pedersen commitments. /// Monero's `H` generator.
///
/// Contrary to convention (`G` for values, `H` for randomness), `H` is used by Monero for amounts
/// within Pedersen commitments.
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub fn H() -> DalekPoint { pub fn H() -> DalekPoint {
*H_CELL.get_or_init(|| { *H_CELL.get_or_init(|| {
@@ -33,7 +36,9 @@ pub fn H() -> DalekPoint {
} }
static H_POW_2_CELL: OnceLock<[DalekPoint; 64]> = OnceLock::new(); static H_POW_2_CELL: OnceLock<[DalekPoint; 64]> = OnceLock::new();
/// Monero's alternate generator `H`, multiplied by 2**i for i in 1 ..= 64. /// Monero's `H` generator, multiplied by 2**i for i in 1 ..= 64.
///
/// This table is useful when working with amounts, which are u64s.
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub fn H_pow_2() -> &'static [DalekPoint; 64] { pub fn H_pow_2() -> &'static [DalekPoint; 64] {
H_POW_2_CELL.get_or_init(|| { H_POW_2_CELL.get_or_init(|| {
@@ -45,8 +50,11 @@ pub fn H_pow_2() -> &'static [DalekPoint; 64] {
}) })
} }
// The maximum amount of commitments proven for within a single range proof.
const MAX_M: usize = 16; const MAX_M: usize = 16;
// The amount of bits the value within a commitment may use.
const N: usize = 64; const N: usize = 64;
// The maximum amount of bits used within a single range proof.
const MAX_MN: usize = MAX_M * N; const MAX_MN: usize = MAX_M * N;
/// Container struct for Bulletproofs(+) generators. /// Container struct for Bulletproofs(+) generators.
@@ -57,18 +65,24 @@ pub struct Generators {
} }
/// Generate generators as needed for Bulletproofs(+), as Monero does. /// Generate generators as needed for Bulletproofs(+), as Monero does.
///
/// Consumers should not call this function ad-hoc, yet call it within a build script or use a
/// once-initialized static.
pub fn bulletproofs_generators(dst: &'static [u8]) -> Generators { pub fn bulletproofs_generators(dst: &'static [u8]) -> Generators {
let mut preimage = H().compress().to_bytes().to_vec();
preimage.extend(dst);
let mut res = Generators { G: Vec::with_capacity(MAX_MN), H: Vec::with_capacity(MAX_MN) }; let mut res = Generators { G: Vec::with_capacity(MAX_MN), H: Vec::with_capacity(MAX_MN) };
for i in 0 .. MAX_MN { for i in 0 .. MAX_MN {
// We generate a pair of generators per iteration
let i = 2 * i; let i = 2 * i;
let mut even = H().compress().to_bytes().to_vec(); let mut even = preimage.clone();
even.extend(dst); write_varint(&i, &mut even).unwrap();
let mut odd = even.clone();
write_varint::<Vec<u8>, u64>(&i.try_into().unwrap(), &mut even).unwrap();
write_varint::<Vec<u8>, u64>(&(i + 1).try_into().unwrap(), &mut odd).unwrap();
res.H.push(EdwardsPoint(hash_to_point(keccak256(&even)))); res.H.push(EdwardsPoint(hash_to_point(keccak256(&even))));
let mut odd = preimage.clone();
write_varint(&(i + 1), &mut odd).unwrap();
res.G.push(EdwardsPoint(hash_to_point(keccak256(&odd)))); res.G.push(EdwardsPoint(hash_to_point(keccak256(&odd))));
} }
res res

View File

@@ -1,38 +0,0 @@
use crate::{decompress_point, hash_to_point};
#[test]
fn crypto_tests() {
// tests.txt file copied from monero repo
// https://github.com/monero-project/monero/
// blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/tests/crypto/tests.txt
let reader = include_str!("./tests.txt");
for line in reader.lines() {
let mut words = line.split_whitespace();
let command = words.next().unwrap();
match command {
"check_key" => {
let key = words.next().unwrap();
let expected = match words.next().unwrap() {
"true" => true,
"false" => false,
_ => unreachable!("invalid result"),
};
let actual = decompress_point(hex::decode(key).unwrap().try_into().unwrap());
assert_eq!(actual.is_some(), expected);
}
"hash_to_ec" => {
let bytes = words.next().unwrap();
let expected = words.next().unwrap();
let actual = hash_to_point(hex::decode(bytes).unwrap().try_into().unwrap());
assert_eq!(hex::encode(actual.compress().to_bytes()), expected);
}
_ => unreachable!("unknown command"),
}
}
}

View File

@@ -1 +1,36 @@
mod hash_to_point; use crate::{decompress_point, hash_to_point};
#[test]
fn test_vectors() {
// tests.txt file copied from monero repo
// https://github.com/monero-project/monero/
// blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/tests/crypto/tests.txt
let reader = include_str!("./tests.txt");
for line in reader.lines() {
let mut words = line.split_whitespace();
let command = words.next().unwrap();
match command {
"check_key" => {
let key = words.next().unwrap();
let expected = match words.next().unwrap() {
"true" => true,
"false" => false,
_ => unreachable!("invalid result"),
};
let actual = decompress_point(hex::decode(key).unwrap().try_into().unwrap());
assert_eq!(actual.is_some(), expected);
}
"hash_to_ec" => {
let bytes = words.next().unwrap();
let expected = words.next().unwrap();
let actual = hash_to_point(hex::decode(bytes).unwrap().try_into().unwrap());
assert_eq!(hex::encode(actual.compress().to_bytes()), expected);
}
_ => unreachable!("unknown command"),
}
}
}

View File

@@ -17,9 +17,13 @@ use curve25519_dalek::{
const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000; const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
mod sealed { mod sealed {
/// A trait for a number readable/writable as a VarInt.
///
/// This is sealed to prevent unintended implementations.
pub trait VarInt: TryInto<u64> + TryFrom<u64> + Copy { pub trait VarInt: TryInto<u64> + TryFrom<u64> + Copy {
const BITS: usize; const BITS: usize;
} }
impl VarInt for u8 { impl VarInt for u8 {
const BITS: usize = 8; const BITS: usize = 8;
} }
@@ -34,17 +38,24 @@ mod sealed {
} }
} }
// This will panic if the VarInt exceeds u64::MAX /// The amount of bytes this number will take when serialized as a VarInt.
pub fn varint_len<U: sealed::VarInt>(varint: U) -> usize { ///
/// This function will panic if the VarInt exceeds u64::MAX.
pub fn varint_len<V: sealed::VarInt>(varint: V) -> usize {
let varint_u64: u64 = varint.try_into().map_err(|_| "varint exceeded u64").unwrap(); let varint_u64: u64 = varint.try_into().map_err(|_| "varint exceeded u64").unwrap();
((usize::try_from(u64::BITS - varint_u64.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1 ((usize::try_from(u64::BITS - varint_u64.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1
} }
/// Write a byte.
///
/// This is used as a building block within generic functions.
pub fn write_byte<W: Write>(byte: &u8, w: &mut W) -> io::Result<()> { pub fn write_byte<W: Write>(byte: &u8, w: &mut W) -> io::Result<()> {
w.write_all(&[*byte]) w.write_all(&[*byte])
} }
// This will panic if the VarInt exceeds u64::MAX /// Write a number, VarInt-encoded,.
///
/// This will panic if the VarInt exceeds u64::MAX.
pub fn write_varint<W: Write, U: sealed::VarInt>(varint: &U, w: &mut W) -> io::Result<()> { pub fn write_varint<W: Write, U: sealed::VarInt>(varint: &U, w: &mut W) -> io::Result<()> {
let mut varint: u64 = (*varint).try_into().map_err(|_| "varint exceeded u64").unwrap(); let mut varint: u64 = (*varint).try_into().map_err(|_| "varint exceeded u64").unwrap();
while { while {
@@ -59,14 +70,17 @@ pub fn write_varint<W: Write, U: sealed::VarInt>(varint: &U, w: &mut W) -> io::R
Ok(()) Ok(())
} }
/// Write a scalar.
pub fn write_scalar<W: Write>(scalar: &Scalar, w: &mut W) -> io::Result<()> { pub fn write_scalar<W: Write>(scalar: &Scalar, w: &mut W) -> io::Result<()> {
w.write_all(&scalar.to_bytes()) w.write_all(&scalar.to_bytes())
} }
/// Write a point.
pub fn write_point<W: Write>(point: &EdwardsPoint, w: &mut W) -> io::Result<()> { pub fn write_point<W: Write>(point: &EdwardsPoint, w: &mut W) -> io::Result<()> {
w.write_all(&point.compress().to_bytes()) w.write_all(&point.compress().to_bytes())
} }
/// Write a list of elements, without length-prefixing,.
pub fn write_raw_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>( pub fn write_raw_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
f: F, f: F,
values: &[T], values: &[T],
@@ -78,6 +92,7 @@ pub fn write_raw_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
Ok(()) Ok(())
} }
/// Write a list of elements, with length-prefixing,.
pub fn write_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>( pub fn write_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
f: F, f: F,
values: &[T], values: &[T],
@@ -87,28 +102,34 @@ pub fn write_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
write_raw_vec(f, values, w) write_raw_vec(f, values, w)
} }
/// Read a constant amount of bytes.
pub fn read_bytes<R: Read, const N: usize>(r: &mut R) -> io::Result<[u8; N]> { pub fn read_bytes<R: Read, const N: usize>(r: &mut R) -> io::Result<[u8; N]> {
let mut res = [0; N]; let mut res = [0; N];
r.read_exact(&mut res)?; r.read_exact(&mut res)?;
Ok(res) Ok(res)
} }
/// Read a single byte.
pub fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> { pub fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> {
Ok(read_bytes::<_, 1>(r)?[0]) Ok(read_bytes::<_, 1>(r)?[0])
} }
/// Read a u16, little-endian encoded,.
pub fn read_u16<R: Read>(r: &mut R) -> io::Result<u16> { pub fn read_u16<R: Read>(r: &mut R) -> io::Result<u16> {
read_bytes(r).map(u16::from_le_bytes) read_bytes(r).map(u16::from_le_bytes)
} }
/// Read a u32, little-endian encoded,.
pub fn read_u32<R: Read>(r: &mut R) -> io::Result<u32> { pub fn read_u32<R: Read>(r: &mut R) -> io::Result<u32> {
read_bytes(r).map(u32::from_le_bytes) read_bytes(r).map(u32::from_le_bytes)
} }
/// Read a u64, little-endian encoded,.
pub fn read_u64<R: Read>(r: &mut R) -> io::Result<u64> { pub fn read_u64<R: Read>(r: &mut R) -> io::Result<u64> {
read_bytes(r).map(u64::from_le_bytes) read_bytes(r).map(u64::from_le_bytes)
} }
/// Read a canonically-encoded VarInt.
pub fn read_varint<R: Read, U: sealed::VarInt>(r: &mut R) -> io::Result<U> { pub fn read_varint<R: Read, U: sealed::VarInt>(r: &mut R) -> io::Result<U> {
let mut bits = 0; let mut bits = 0;
let mut res = 0; let mut res = 0;
@@ -128,20 +149,24 @@ pub fn read_varint<R: Read, U: sealed::VarInt>(r: &mut R) -> io::Result<U> {
res.try_into().map_err(|_| io::Error::other("VarInt does not fit into integer type")) res.try_into().map_err(|_| io::Error::other("VarInt does not fit into integer type"))
} }
// All scalar fields supported by monero-serai are checked to be canonical for valid transactions /// Read a canonically-encoded scalar.
// While from_bytes_mod_order would be more flexible, it's not currently needed and would be ///
// inaccurate to include now. While casting a wide net may be preferable, it'd also be inaccurate /// Some scalars within the Monero protocol are not enforced to be canonically encoded. For such
// for now. There's also further edge cases as noted by /// scalars, they should be represented as `[u8; 32]` and later converted to scalars as relevant.
// https://github.com/monero-project/monero/issues/8438, where some scalars had an archaic
// reduction applied
pub fn read_scalar<R: Read>(r: &mut R) -> io::Result<Scalar> { pub fn read_scalar<R: Read>(r: &mut R) -> io::Result<Scalar> {
Option::from(Scalar::from_canonical_bytes(read_bytes(r)?)) Option::from(Scalar::from_canonical_bytes(read_bytes(r)?))
.ok_or_else(|| io::Error::other("unreduced scalar")) .ok_or_else(|| io::Error::other("unreduced scalar"))
} }
/// Decompress a canonically encoded ed25519 point. /// Decompress a canonically-encoded Ed25519 point.
/// ///
/// This function does not check if the point is within the prime order subgroup. /// Ed25519 is of order `8 * l`. This function ensures each of those `8 * l` points have a singular
/// encoding by checking points aren't encoded with an unreduced field element, and aren't negative
/// when the negative is equivalent (0 == -0).
///
/// Since this decodes an Ed25519 point, it does not check the point is in the prime-order
/// subgroup. Torsioned points do have a canonical encoding, and only aren't canonical when
/// considered in relation to the prime-order subgroup.
pub fn decompress_point(bytes: [u8; 32]) -> Option<EdwardsPoint> { pub fn decompress_point(bytes: [u8; 32]) -> Option<EdwardsPoint> {
CompressedEdwardsY(bytes) CompressedEdwardsY(bytes)
.decompress() .decompress()
@@ -149,11 +174,16 @@ pub fn decompress_point(bytes: [u8; 32]) -> Option<EdwardsPoint> {
.filter(|point| point.compress().to_bytes() == bytes) .filter(|point| point.compress().to_bytes() == bytes)
} }
/// Read a canonically-encoded Ed25519 point.
///
/// This internally calls `decompress_point` and has the same definition of canonicity. This
/// function does not check the resulting point is within the prime-order subgroup.
pub fn read_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> { pub fn read_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
let bytes = read_bytes(r)?; let bytes = read_bytes(r)?;
decompress_point(bytes).ok_or_else(|| io::Error::other("invalid point")) decompress_point(bytes).ok_or_else(|| io::Error::other("invalid point"))
} }
/// Read a canonically-encoded Ed25519 point, within the prime-order subgroup.
pub fn read_torsion_free_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> { pub fn read_torsion_free_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
read_point(r) read_point(r)
.ok() .ok()
@@ -161,6 +191,7 @@ pub fn read_torsion_free_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
.ok_or_else(|| io::Error::other("invalid point")) .ok_or_else(|| io::Error::other("invalid point"))
} }
/// Read a variable-length list of elements, without length-prefixing.
pub fn read_raw_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>( pub fn read_raw_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(
f: F, f: F,
len: usize, len: usize,
@@ -173,6 +204,7 @@ pub fn read_raw_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(
Ok(res) Ok(res)
} }
/// Read a constant-length list of elements.
pub fn read_array<R: Read, T: Debug, F: Fn(&mut R) -> io::Result<T>, const N: usize>( pub fn read_array<R: Read, T: Debug, F: Fn(&mut R) -> io::Result<T>, const N: usize>(
f: F, f: F,
r: &mut R, r: &mut R,
@@ -180,6 +212,7 @@ pub fn read_array<R: Read, T: Debug, F: Fn(&mut R) -> io::Result<T>, const N: us
read_raw_vec(f, N, r).map(|vec| vec.try_into().unwrap()) read_raw_vec(f, N, r).map(|vec| vec.try_into().unwrap())
} }
/// Read a length-prefixed variable-length list of elements.
pub fn read_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(f: F, r: &mut R) -> io::Result<Vec<T>> { pub fn read_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(f: F, r: &mut R) -> io::Result<Vec<T>> {
read_raw_vec(f, read_varint(r)?, r) read_raw_vec(f, read_varint(r)?, r)
} }

View File

@@ -18,7 +18,6 @@ workspace = true
[dependencies] [dependencies]
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
# Cryptographic dependencies # Cryptographic dependencies
@@ -33,7 +32,6 @@ monero-generators = { path = "../generators", version = "0.4", default-features
std = [ std = [
"std-shims/std", "std-shims/std",
"rand_core/std",
"zeroize/std", "zeroize/std",
"sha3/std", "sha3/std",

View File

@@ -2,7 +2,7 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
use std_shims::{vec, vec::Vec}; use std_shims::vec::Vec;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std_shims::sync::OnceLock; use std_shims::sync::OnceLock;
@@ -16,25 +16,25 @@ use curve25519_dalek::{
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
}; };
use monero_io::varint_len;
use monero_generators::H; use monero_generators::H;
// TODO: Replace this with a const // On std, we cache some variables in statics.
#[cfg(feature = "std")] #[cfg(feature = "std")]
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new(); static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[allow(non_snake_case)] #[allow(non_snake_case)]
/// The inverse of 8 over l.
pub fn INV_EIGHT() -> Scalar { pub fn INV_EIGHT() -> Scalar {
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert()) *INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
} }
// In no-std environments, we prefer the reduced memory use and calculate it ad-hoc.
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
#[allow(non_snake_case)] #[allow(non_snake_case)]
/// The inverse of 8 over l.
pub fn INV_EIGHT() -> Scalar { pub fn INV_EIGHT() -> Scalar {
Scalar::from(8u8).invert() Scalar::from(8u8).invert()
} }
// On std, we cache this in a static
// In no-std environments, we prefer the reduced memory use and calculate it ad-hoc
#[cfg(feature = "std")] #[cfg(feature = "std")]
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new(); static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
#[cfg(feature = "std")] #[cfg(feature = "std")]
@@ -49,11 +49,14 @@ pub fn BASEPOINT_PRECOMP() -> VartimeEdwardsPrecomputation {
VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]) VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT])
} }
/// The Keccak-256 hash function.
pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] { pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] {
Keccak256::digest(data.as_ref()).into() Keccak256::digest(data.as_ref()).into()
} }
/// Hash the provided data to a scalar via keccak256(data) % l. /// Hash the provided data to a scalar via keccak256(data) % l.
///
/// This function panics if it finds the Keccak-256 preimage for [0; 32].
pub fn keccak256_to_scalar(data: impl AsRef<[u8]>) -> Scalar { pub fn keccak256_to_scalar(data: impl AsRef<[u8]>) -> Scalar {
let scalar = Scalar::from_bytes_mod_order(keccak256(data.as_ref())); let scalar = Scalar::from_bytes_mod_order(keccak256(data.as_ref()));
// Monero will explicitly error in this case // Monero will explicitly error in this case
@@ -84,39 +87,68 @@ impl Commitment {
Commitment { mask: Scalar::ONE, amount: 0 } Commitment { mask: Scalar::ONE, amount: 0 }
} }
/// Create a new Commitment.
pub fn new(mask: Scalar, amount: u64) -> Commitment { pub fn new(mask: Scalar, amount: u64) -> Commitment {
Commitment { mask, amount } Commitment { mask, amount }
} }
/// Calculate a Pedersen commitment, as a point, from the transparent structure. /// Calculate the Pedersen commitment, as a point, from this transparent structure.
pub fn calculate(&self) -> EdwardsPoint { pub fn calculate(&self) -> EdwardsPoint {
EdwardsPoint::vartime_double_scalar_mul_basepoint(&Scalar::from(self.amount), &H(), &self.mask) EdwardsPoint::vartime_double_scalar_mul_basepoint(&Scalar::from(self.amount), &H(), &self.mask)
} }
} }
/// Decoy data, containing the actual member as well (at index `i`). /// Decoy data, as used for producing Monero's ring signatures.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
pub struct Decoys { pub struct Decoys {
pub i: u8, offsets: Vec<u64>,
pub offsets: Vec<u64>, signer_index: u8,
pub ring: Vec<[EdwardsPoint; 2]>, ring: Vec<[EdwardsPoint; 2]>,
} }
#[allow(clippy::len_without_is_empty)] #[allow(clippy::len_without_is_empty)]
impl Decoys { impl Decoys {
pub fn fee_weight(offsets: &[u64]) -> usize { /// Create a new instance of decoy data.
varint_len(offsets.len()) + offsets.iter().map(|offset| varint_len(*offset)).sum::<usize>() ///
/// `offsets` are the positions of each ring member within the Monero blockchain, offset from the
/// prior member's position (with the initial ring member offset from 0).
pub fn new(offsets: Vec<u64>, signer_index: u8, ring: Vec<[EdwardsPoint; 2]>) -> Option<Self> {
if (offsets.len() != ring.len()) || (usize::from(signer_index) >= ring.len()) {
None?;
}
Some(Decoys { offsets, signer_index, ring })
} }
/// The length of the ring.
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
self.offsets.len() self.offsets.len()
} }
pub fn indexes(&self) -> Vec<u64> { /// The positions of the ring members within the Monero blockchain, as their offsets.
let mut res = vec![self.offsets[0]; self.len()]; ///
/// The list is formatted as the position of the first ring member, then the offset from each
/// ring member to its prior.
pub fn offsets(&self) -> &[u64] {
&self.offsets
}
/// The positions of the ring members within the Monero blockchain.
pub fn positions(&self) -> Vec<u64> {
let mut res = Vec::with_capacity(self.len());
res.push(self.offsets[0]);
for m in 1 .. res.len() { for m in 1 .. res.len() {
res[m] = res[m - 1] + self.offsets[m]; res.push(res[m - 1] + self.offsets[m]);
} }
res res
} }
/// The index of the signer within the ring.
pub fn signer_index(&self) -> u8 {
self.signer_index
}
// The ring.
pub fn ring(&self) -> &[[EdwardsPoint; 2]] {
&self.ring
}
} }

View File

@@ -25,7 +25,6 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der
subtle = { version = "^2.4", default-features = false } subtle = { version = "^2.4", default-features = false }
# Cryptographic dependencies # Cryptographic dependencies
sha3 = { version = "0.10", default-features = false }
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] } curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
# Multisig dependencies # Multisig dependencies
@@ -50,7 +49,6 @@ std = [
"zeroize/std", "zeroize/std",
"subtle/std", "subtle/std",
"sha3/std",
"curve25519-dalek/precomputed-tables", "curve25519-dalek/precomputed-tables",
"rand_chacha?/std", "rand_chacha?/std",

View File

@@ -73,12 +73,12 @@ impl ClsagInput {
Err(ClsagError::InvalidRing)?; Err(ClsagError::InvalidRing)?;
} }
let n = u8::try_from(n).unwrap(); let n = u8::try_from(n).unwrap();
if decoys.i >= n { if decoys.signer_index() >= n {
Err(ClsagError::InvalidRingMember(decoys.i, n))?; Err(ClsagError::InvalidRingMember(decoys.signer_index(), n))?;
} }
// Validate the commitment matches // Validate the commitment matches
if decoys.ring[usize::from(decoys.i)][1] != commitment.calculate() { if decoys.ring()[usize::from(decoys.signer_index())][1] != commitment.calculate() {
Err(ClsagError::InvalidCommitment)?; Err(ClsagError::InvalidCommitment)?;
} }
@@ -244,19 +244,19 @@ impl Clsag {
A: EdwardsPoint, A: EdwardsPoint,
AH: EdwardsPoint, AH: EdwardsPoint,
) -> ClsagSignCore { ) -> ClsagSignCore {
let r: usize = input.decoys.i.into(); let r: usize = input.decoys.signer_index().into();
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate(); let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
let mask_delta = input.commitment.mask - mask; let mask_delta = input.commitment.mask - mask;
let H = hash_to_point(input.decoys.ring[r][0].compress().0); let H = hash_to_point(input.decoys.ring()[r][0].compress().0);
let D = H * mask_delta; let D = H * mask_delta;
let mut s = Vec::with_capacity(input.decoys.ring.len()); let mut s = Vec::with_capacity(input.decoys.ring().len());
for _ in 0 .. input.decoys.ring.len() { for _ in 0 .. input.decoys.ring().len() {
s.push(Scalar::random(rng)); s.push(Scalar::random(rng));
} }
let ((D, c_p, c_c), c1) = let ((D, c_p, c_c), c1) =
core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH)); core(input.decoys.ring(), I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH));
ClsagSignCore { ClsagSignCore {
incomplete_clsag: Clsag { D, s, c1 }, incomplete_clsag: Clsag { D, s, c1 },
@@ -297,13 +297,15 @@ impl Clsag {
nonce.deref() * ED25519_BASEPOINT_TABLE, nonce.deref() * ED25519_BASEPOINT_TABLE,
nonce.deref() * nonce.deref() *
hash_to_point( hash_to_point(
inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0].compress().0, inputs[i].2.decoys.ring()[usize::from(inputs[i].2.decoys.signer_index())][0]
.compress()
.0,
), ),
); );
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring // Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
// member's commitment and our input commitment (which will only have a known discrete log // member's commitment and our input commitment (which will only have a known discrete log
// over G if the amounts cancel out) // over G if the amounts cancel out)
incomplete_clsag.s[usize::from(inputs[i].2.decoys.i)] = incomplete_clsag.s[usize::from(inputs[i].2.decoys.signer_index())] =
nonce.deref() - ((key_challenge * inputs[i].0.deref()) + challenged_mask); nonce.deref() - ((key_challenge * inputs[i].0.deref()) + challenged_mask);
let clsag = incomplete_clsag; let clsag = incomplete_clsag;
@@ -312,7 +314,7 @@ impl Clsag {
nonce.zeroize(); nonce.zeroize();
debug_assert!(clsag debug_assert!(clsag
.verify(&inputs[i].2.decoys.ring, &inputs[i].1, &pseudo_out, &msg) .verify(inputs[i].2.decoys.ring(), &inputs[i].1, &pseudo_out, &msg)
.is_ok()); .is_ok());
res.push((clsag, pseudo_out)); res.push((clsag, pseudo_out));

View File

@@ -35,10 +35,10 @@ impl ClsagInput {
// Doesn't domain separate as this is considered part of the larger CLSAG proof // Doesn't domain separate as this is considered part of the larger CLSAG proof
// Ring index // Ring index
transcript.append_message(b"real_spend", [self.decoys.i]); transcript.append_message(b"real_spend", [self.decoys.signer_index()]);
// Ring // Ring
for (i, pair) in self.decoys.ring.iter().enumerate() { for (i, pair) in self.decoys.ring().iter().enumerate() {
// Doesn't include global output indexes as CLSAG doesn't care and won't be affected by it // Doesn't include global output indexes as CLSAG doesn't care and won't be affected by it
// They're just a unreliable reference to this data which will be included in the message // They're just a unreliable reference to this data which will be included in the message
// if in use // if in use
@@ -249,10 +249,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
let mut clsag = interim.clsag.clone(); let mut clsag = interim.clsag.clone();
// We produced shares as `r - p x`, yet the signature is `r - p x - c x` // We produced shares as `r - p x`, yet the signature is `r - p x - c x`
// Substract `c x` (saved as `c`) now // Substract `c x` (saved as `c`) now
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; clsag.s[usize::from(self.input().decoys.signer_index())] = sum.0 - interim.c;
if clsag if clsag
.verify( .verify(
&self.input().decoys.ring, self.input().decoys.ring(),
&self.image.expect("verifying a signature despite never processing any addendums").0, &self.image.expect("verifying a signature despite never processing any addendums").0,
&interim.pseudo_out, &interim.pseudo_out,
self.msg.as_ref().unwrap(), self.msg.as_ref().unwrap(),

View File

@@ -64,11 +64,8 @@ fn clsag() {
image, image,
ClsagInput::new( ClsagInput::new(
Commitment::new(secrets.1, AMOUNT), Commitment::new(secrets.1, AMOUNT),
Decoys { Decoys::new((1 ..= RING_LEN).collect(), u8::try_from(real).unwrap(), ring.clone())
i: u8::try_from(real).unwrap(), .unwrap(),
offsets: (1 ..= RING_LEN).collect(),
ring: ring.clone(),
},
) )
.unwrap(), .unwrap(),
)], )],
@@ -115,7 +112,7 @@ fn clsag_multisig() {
Arc::new(RwLock::new(Some(ClsagDetails::new( Arc::new(RwLock::new(Some(ClsagDetails::new(
ClsagInput::new( ClsagInput::new(
Commitment::new(randomness, AMOUNT), Commitment::new(randomness, AMOUNT),
Decoys { i: RING_INDEX, offsets: (1 ..= RING_LEN).collect(), ring: ring.clone() }, Decoys::new((1 ..= RING_LEN).collect(), RING_INDEX, ring.clone()).unwrap(),
) )
.unwrap(), .unwrap(),
mask_sum, mask_sum,

View File

@@ -260,12 +260,15 @@ async fn select_decoys<R: RngCore + CryptoRng, RPC: RpcConnection>(
// members // members
} }
res.push(Decoys { res.push(
// Binary searches for the real spend since we don't know where it sorted to Decoys::new(
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(), offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()), // Binary searches for the real spend since we don't know where it sorted to
ring: ring.iter().map(|output| output.1).collect(), u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
}); ring.iter().map(|output| output.1).collect(),
)
.unwrap(),
);
} }
Ok(res) Ok(res)

View File

@@ -22,6 +22,8 @@ use dalek_ff_group as dfg;
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
use frost::FrostError; use frost::FrostError;
use monero_io::varint_len;
use crate::{ use crate::{
Protocol, Commitment, hash, Protocol, Commitment, hash,
serialize::{ serialize::{
@@ -181,7 +183,7 @@ fn prepare_inputs(
tx.prefix.inputs.push(Input::ToKey { tx.prefix.inputs.push(Input::ToKey {
amount: None, amount: None,
key_offsets: decoys.offsets.clone(), key_offsets: decoys.offsets().to_vec(),
key_image: signable[i].1, key_image: signable[i].1,
}); });
} }
@@ -518,8 +520,13 @@ impl SignableTransaction {
} }
// Caclculate weight of decoys // Caclculate weight of decoys
let decoy_weights = let decoy_weights = inputs
inputs.iter().map(|(_, decoy)| Decoys::fee_weight(&decoy.offsets)).collect::<Vec<_>>(); .iter()
.map(|(_, decoys)| {
let offsets = decoys.offsets();
varint_len(offsets.len()) + offsets.iter().map(|offset| varint_len(*offset)).sum::<usize>()
})
.collect::<Vec<_>>();
// Deterministically calculate tx weight and fee // Deterministically calculate tx weight and fee
let (weight, fee) = let (weight, fee) =

View File

@@ -108,11 +108,11 @@ impl SignableTransaction {
transcript.append_message(b"input_shared_key", input.key_offset().to_bytes()); transcript.append_message(b"input_shared_key", input.key_offset().to_bytes());
// Ensure all signers are signing the same rings // Ensure all signers are signing the same rings
transcript.append_message(b"real_spend", [decoys.i]); transcript.append_message(b"real_spend", [decoys.signer_index()]);
for (i, ring_member) in decoys.ring.iter().enumerate() { for (i, ring_member) in decoys.ring().iter().enumerate() {
transcript transcript
.append_message(b"ring_member", [u8::try_from(i).expect("ring size exceeded 255")]); .append_message(b"ring_member", [u8::try_from(i).expect("ring size exceeded 255")]);
transcript.append_message(b"ring_member_offset", decoys.offsets[i].to_le_bytes()); transcript.append_message(b"ring_member_offset", decoys.offsets()[i].to_le_bytes());
transcript.append_message(b"ring_member_key", ring_member[0].compress().to_bytes()); transcript.append_message(b"ring_member_key", ring_member[0].compress().to_bytes());
transcript.append_message(b"ring_member_commitment", ring_member[1].compress().to_bytes()); transcript.append_message(b"ring_member_commitment", ring_member[1].compress().to_bytes());
} }
@@ -356,7 +356,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
tx.prefix.inputs.push(Input::ToKey { tx.prefix.inputs.push(Input::ToKey {
amount: None, amount: None,
key_offsets: value.2.offsets.clone(), key_offsets: value.2.offsets().to_vec(),
key_image: value.0, key_image: value.0,
}); });

View File

@@ -74,7 +74,7 @@ test!(
.await .await
.unwrap(); .unwrap();
selected_fresh_decoy = decoys[0].indexes().contains(&output_tx1.global_index); selected_fresh_decoy = decoys[0].positions().contains(&output_tx1.global_index);
attempts -= 1; attempts -= 1;
} }
@@ -151,7 +151,7 @@ test!(
.await .await
.unwrap(); .unwrap();
selected_fresh_decoy = decoys[0].indexes().contains(&output_tx1.global_index); selected_fresh_decoy = decoys[0].positions().contains(&output_tx1.global_index);
attempts -= 1; attempts -= 1;
} }