Files
serai/coins/monero/src/wallet/send/multisig.rs

410 lines
14 KiB
Rust
Raw Normal View History

2022-07-15 01:26:07 -04:00
use std::{
io::{self, Read},
2022-07-15 01:26:07 -04:00
sync::{Arc, RwLock},
collections::HashMap,
};
2022-04-30 04:32:19 -04:00
2022-05-06 07:33:08 -04:00
use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha20Rng;
2022-04-30 04:32:19 -04:00
use group::ff::Field;
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
use dalek_ff_group as dfg;
2022-04-30 04:32:19 -04:00
use transcript::{Transcript, RecommendedTranscript};
use frost::{
2022-06-24 19:47:19 -04:00
curve::Ed25519,
FrostError, ThresholdKeys,
sign::{
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
2022-07-15 01:26:07 -04:00
},
};
2022-04-30 04:32:19 -04:00
use crate::{
2022-07-15 01:26:07 -04:00
random_scalar,
ringct::{
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
2022-07-15 01:26:07 -04:00
RctPrunable,
},
transaction::{Input, Transaction},
2022-04-30 04:32:19 -04:00
rpc::Rpc,
2022-07-15 01:26:07 -04:00
wallet::{TransactionError, SignableTransaction, Decoys, key_image_sort, uniqueness},
2022-04-30 04:32:19 -04:00
};
/// FROST signing machine to produce a signed transaction.
2022-04-30 04:32:19 -04:00
pub struct TransactionMachine {
signable: SignableTransaction,
i: u16,
transcript: RecommendedTranscript,
2022-05-06 07:33:08 -04:00
2022-05-06 19:07:37 -04:00
decoys: Vec<Decoys>,
// Hashed key and scalar offset
key_images: Vec<(EdwardsPoint, Scalar)>,
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
2022-07-15 01:26:07 -04:00
clsags: Vec<AlgorithmMachine<Ed25519, ClsagMultisig>>,
}
pub struct TransactionSignMachine {
signable: SignableTransaction,
i: u16,
transcript: RecommendedTranscript,
decoys: Vec<Decoys>,
key_images: Vec<(EdwardsPoint, Scalar)>,
2022-06-05 07:33:15 -04:00
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
clsags: Vec<AlgorithmSignMachine<Ed25519, ClsagMultisig>>,
our_preprocess: Vec<Preprocess<Ed25519, ClsagAddendum>>,
}
2022-05-06 19:07:37 -04:00
pub struct TransactionSignatureMachine {
tx: Transaction,
2022-07-15 01:26:07 -04:00
clsags: Vec<AlgorithmSignatureMachine<Ed25519, ClsagMultisig>>,
2022-04-30 04:32:19 -04:00
}
impl SignableTransaction {
/// Create a FROST signing machine out of this signable transaction.
/// The height is the Monero blockchain height to synchronize around.
pub async fn multisig(
self,
2022-04-30 04:32:19 -04:00
rpc: &Rpc,
keys: ThresholdKeys<Ed25519>,
mut transcript: RecommendedTranscript,
2022-06-05 15:10:50 -04:00
height: usize,
2022-04-30 04:32:19 -04:00
) -> Result<TransactionMachine, TransactionError> {
let mut inputs = vec![];
for _ in 0 .. self.inputs.len() {
// Doesn't resize as that will use a single Rc for the entire Vec
2022-06-05 07:33:15 -04:00
inputs.push(Arc::new(RwLock::new(None)));
}
2022-05-06 19:07:37 -04:00
let mut clsags = vec![];
// Create a RNG out of the input shared keys, which either requires the view key or being every
// sender, and the payments (address and amount), which a passive adversary may be able to know
2022-05-06 07:33:08 -04:00
// depending on how these transactions are coordinated
// Being every sender would already let you note rings which happen to use your transactions
// multiple times, already breaking privacy there
2022-05-06 07:33:08 -04:00
2022-05-23 03:24:33 -04:00
transcript.domain_separate(b"monero_transaction");
// Include the height we're using for our data
// The data itself will be included, making this unnecessary, yet a lot of this is technically
// unnecessary. Anything which further increases security at almost no cost should be followed
transcript.append_message(b"height", u64::try_from(height).unwrap().to_le_bytes());
2022-07-15 01:26:07 -04:00
// Also include the spend_key as below only the key offset is included, so this transcripts the
// sum product
// Useful as transcripting the sum product effectively transcripts the key image, further
// guaranteeing the one time properties noted below
transcript.append_message(b"spend_key", keys.group_key().0.compress().to_bytes());
2022-04-30 04:32:19 -04:00
for input in &self.inputs {
2022-05-06 07:33:08 -04:00
// These outputs can only be spent once. Therefore, it forces all RNGs derived from this
// transcript (such as the one used to create one time keys) to be unique
transcript.append_message(b"input_hash", input.output.absolute.tx);
transcript.append_message(b"input_output_index", [input.output.absolute.o]);
2022-05-06 07:33:08 -04:00
// Not including this, with a doxxed list of payments, would allow brute forcing the inputs
// to determine RNG seeds and therefore the true spends
transcript.append_message(b"input_shared_key", input.key_offset().to_bytes());
}
for payment in &self.payments {
transcript.append_message(b"payment_address", payment.0.to_string().as_bytes());
transcript.append_message(b"payment_amount", payment.1.to_le_bytes());
}
2022-04-30 04:32:19 -04:00
let mut key_images = vec![];
for (i, input) in self.inputs.iter().enumerate() {
// Check this the right set of keys
let offset = keys.offset(dfg::Scalar(input.key_offset()));
2022-08-22 13:35:49 -04:00
if offset.group_key().0 != input.key() {
Err(TransactionError::WrongPrivateKey)?;
}
let clsag = ClsagMultisig::new(transcript.clone(), input.key(), inputs[i].clone());
key_images.push((
clsag.H,
keys.current_offset().unwrap_or_else(dfg::Scalar::zero).0 + self.inputs[i].key_offset(),
));
clsags.push(AlgorithmMachine::new(clsag, offset).map_err(TransactionError::FrostError)?);
2022-04-30 04:32:19 -04:00
}
// Select decoys
// Ideally, this would be done post entropy, instead of now, yet doing so would require sign
// to be async which isn't preferable. This should be suitably competent though
// While this inability means we can immediately create the input, moving it out of the
// Arc RwLock, keeping it within an Arc RwLock keeps our options flexible
let decoys = Decoys::select(
// Using a seeded RNG with a specific height, committed to above, should make these decoys
// committed to. They'll also be committed to later via the TX message as a whole
&mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")),
rpc,
self.protocol.ring_len(),
height,
2022-07-15 01:26:07 -04:00
&self.inputs,
)
.await
.map_err(TransactionError::RpcError)?;
2022-07-15 01:26:07 -04:00
Ok(TransactionMachine {
signable: self,
i: keys.params().i(),
transcript,
2022-05-06 19:07:37 -04:00
2022-07-15 01:26:07 -04:00
decoys,
2022-05-06 19:07:37 -04:00
key_images,
2022-07-15 01:26:07 -04:00
inputs,
clsags,
})
2022-04-30 04:32:19 -04:00
}
}
impl PreprocessMachine for TransactionMachine {
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
2022-04-30 04:32:19 -04:00
type Signature = Transaction;
type SignMachine = TransactionSignMachine;
2022-04-30 04:32:19 -04:00
fn preprocess<R: RngCore + CryptoRng>(
mut self,
2022-07-15 01:26:07 -04:00
rng: &mut R,
) -> (TransactionSignMachine, Self::Preprocess) {
2022-04-30 04:32:19 -04:00
// Iterate over each CLSAG calling preprocess
let mut preprocesses = Vec::with_capacity(self.clsags.len());
2022-07-15 01:26:07 -04:00
let clsags = self
.clsags
.drain(..)
.map(|clsag| {
let (clsag, preprocess) = clsag.preprocess(rng);
preprocesses.push(preprocess);
2022-07-15 01:26:07 -04:00
clsag
})
.collect();
let our_preprocess = preprocesses.clone();
2022-04-30 04:32:19 -04:00
// We could add further entropy here, and previous versions of this library did so
// As of right now, the multisig's key, the inputs being spent, and the FROST data itself
// will be used for RNG seeds. In order to recreate these RNG seeds, breaking privacy,
// counterparties must have knowledge of the multisig, either the view key or access to the
// coordination layer, and then access to the actual FROST signing process
// If the commitments are sent in plain text, then entropy here also would be, making it not
// increase privacy. If they're not sent in plain text, or are otherwise inaccessible, they
// already offer sufficient entropy. That's why further entropy is not included
2022-04-30 04:32:19 -04:00
(
TransactionSignMachine {
signable: self.signable,
i: self.i,
transcript: self.transcript,
decoys: self.decoys,
key_images: self.key_images,
inputs: self.inputs,
clsags,
our_preprocess,
},
preprocesses,
)
2022-04-30 04:32:19 -04:00
}
}
impl SignMachine<Transaction> for TransactionSignMachine {
type Params = ();
type Keys = ThresholdKeys<Ed25519>;
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
type SignatureShare = Vec<SignatureShare<Ed25519>>;
type SignatureMachine = TransactionSignatureMachine;
2022-04-30 04:32:19 -04:00
DKG Blame (#196) * Standardize the DLEq serialization function naming They mismatched from the rest of the project. This commit is technically incomplete as it doesn't update the dkg crate. * Rewrite DKG encryption to enable per-message decryption without side effects This isn't technically true as I already know a break in this which I'll correct for shortly. Does update documentation to explain the new scheme. Required for blame. * Add a verifiable system for blame during the FROST DKG Previously, if sent an invalid key share, the participant would realize that and could accuse the sender. Without further evidence, either the accuser or the accused could be guilty. Now, the accuser has a proof the accused is in the wrong. Reworks KeyMachine to return BlameMachine. This explicitly acknowledges how locally complete keys still need group acknowledgement before the protocol can be complete and provides a way for others to verify blame, even after a locally successful run. If any blame is cast, the protocol is no longer considered complete-able (instead aborting). Further accusations of blame can still be handled however. Updates documentation on network behavior. Also starts to remove "OnDrop". We now use Zeroizing for anything which should be zeroized on drop. This is a lot more piece-meal and reduces clones. * Tweak Zeroizing and Debug impls Expands Zeroizing to be more comprehensive. Also updates Zeroizing<CachedPreprocess([u8; 32])> to CachedPreprocess(Zeroizing<[u8; 32]>) so zeroizing is the first thing done and last step before exposing the copy-able [u8; 32]. Removes private keys from Debug. * Fix a bug where adversaries could claim to be using another user's encryption keys to learn their messages Mentioned a few commits ago, now fixed. This wouldn't have affected Serai, which aborts on failure, nor any DKG currently supported. It's just about ensuring the DKG encryption is robust and proper. * Finish moving dleq from ser/deser to write/read * Add tests for dkg blame * Add a FROST test for invalid signature shares * Batch verify encrypted messages' ephemeral keys' PoP
2023-01-01 01:54:18 -05:00
fn cache(self) -> CachedPreprocess {
unimplemented!(
"Monero transactions don't support caching their preprocesses due to {}",
"being already bound to a specific transaction"
);
}
DKG Blame (#196) * Standardize the DLEq serialization function naming They mismatched from the rest of the project. This commit is technically incomplete as it doesn't update the dkg crate. * Rewrite DKG encryption to enable per-message decryption without side effects This isn't technically true as I already know a break in this which I'll correct for shortly. Does update documentation to explain the new scheme. Required for blame. * Add a verifiable system for blame during the FROST DKG Previously, if sent an invalid key share, the participant would realize that and could accuse the sender. Without further evidence, either the accuser or the accused could be guilty. Now, the accuser has a proof the accused is in the wrong. Reworks KeyMachine to return BlameMachine. This explicitly acknowledges how locally complete keys still need group acknowledgement before the protocol can be complete and provides a way for others to verify blame, even after a locally successful run. If any blame is cast, the protocol is no longer considered complete-able (instead aborting). Further accusations of blame can still be handled however. Updates documentation on network behavior. Also starts to remove "OnDrop". We now use Zeroizing for anything which should be zeroized on drop. This is a lot more piece-meal and reduces clones. * Tweak Zeroizing and Debug impls Expands Zeroizing to be more comprehensive. Also updates Zeroizing<CachedPreprocess([u8; 32])> to CachedPreprocess(Zeroizing<[u8; 32]>) so zeroizing is the first thing done and last step before exposing the copy-able [u8; 32]. Removes private keys from Debug. * Fix a bug where adversaries could claim to be using another user's encryption keys to learn their messages Mentioned a few commits ago, now fixed. This wouldn't have affected Serai, which aborts on failure, nor any DKG currently supported. It's just about ensuring the DKG encryption is robust and proper. * Finish moving dleq from ser/deser to write/read * Add tests for dkg blame * Add a FROST test for invalid signature shares * Batch verify encrypted messages' ephemeral keys' PoP
2023-01-01 01:54:18 -05:00
fn from_cache(_: (), _: ThresholdKeys<Ed25519>, _: CachedPreprocess) -> Result<Self, FrostError> {
unimplemented!(
"Monero transactions don't support caching their preprocesses due to {}",
"being already bound to a specific transaction"
);
}
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
self.clsags.iter().map(|clsag| clsag.read_preprocess(reader)).collect()
}
fn sign(
mut self,
mut commitments: HashMap<u16, Self::Preprocess>,
2022-07-15 01:26:07 -04:00
msg: &[u8],
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
2022-07-15 01:26:07 -04:00
Err(FrostError::InternalError(
"message was passed to the TransactionMachine when it generates its own",
))?;
}
// Find out who's included
// This may not be a valid set of signers yet the algorithm machine will error if it's not
commitments.remove(&self.i); // Remove, if it was included for some reason
Initial In Instructions pallet and Serai client lib (#233) * Initial work on an In Inherents pallet * Add an event for when a batch is executed * Add a dummy provider for InInstructions * Add in-instructions to the node * Add the Serai runtime API to the processor * Move processor tests around * Build a subxt Client around Serai * Successfully get Batch events from Serai Renamed processor/substrate to processor/serai. * Much more robust InInstruction pallet * Implement the workaround from https://github.com/paritytech/subxt/issues/602 * Initial prototype of processor generated InInstructions * Correct PendingCoins data flow for InInstructions * Minor lint to in-instructions * Remove the global Serai connection for a partial re-impl * Correct ID handling of the processor test * Workaround the delay in the subscription * Make an unwrap an if let Some, remove old comments * Lint the processor toml * Rebase and update * Move substrate/in-instructions to substrate/in-instructions/pallet * Start an in-instructions primitives lib * Properly update processor to subxt 0.24 Also corrects failures from the rebase. * in-instructions cargo update * Implement IsFatalError * is_inherent -> true * Rename in-instructions crates and misc cleanup * Update documentation * cargo update * Misc update fixes * Replace height with block_number * Update processor src to latest subxt * Correct pipeline for InInstructions testing * Remove runtime::AccountId for serai_primitives::NativeAddress * Rewrite the in-instructions pallet Complete with respect to the currently written docs. Drops the custom serializer for just using SCALE. Makes slight tweaks as relevant. * Move instructions' InherentDataProvider to a client crate * Correct doc gen * Add serde to in-instructions-primitives * Add in-instructions-primitives to pallet * Heights -> BlockNumbers * Get batch pub test loop working * Update in instructions pallet terminology Removes the ambiguous Coin for Update. Removes pending/artificial latency for furture client work. Also moves to using serai_primitives::Coin. * Add a BlockNumber primitive * Belated cargo fmt * Further document why DifferentBatch isn't fatal * Correct processor sleeps * Remove metadata at compile time, add test framework for Serai nodes * Remove manual RPC client * Simplify update test * Improve re-exporting behavior of serai-runtime It now re-exports all pallets underneath it. * Add a function to get storage values to the Serai RPC * Update substrate/ to latest substrate * Create a dedicated crate for the Serai RPC * Remove unused dependencies in substrate/ * Remove unused dependencies in coins/ Out of scope for this branch, just minor and path of least resistance. * Use substrate/serai/client for the Serai RPC lib It's a bit out of place, since these client folders are intended for the node to access pallets and so on. This is for end-users to access Serai as a whole. In that sense, it made more sense as a top level folder, yet that also felt out of place. * Move InInstructions test to serai-client for now * Final cleanup * Update deny.toml * Cargo.lock update from merging develop * Update nightly Attempt to work around the current CI failure, which is a Rust ICE. We previously didn't upgrade due to clippy 10134, yet that's been reverted. * clippy * clippy * fmt * NativeAddress -> SeraiAddress * Sec fix on non-provided updates and doc fixes * Add Serai as a Coin Necessary in order to swap to Serai. * Add a BlockHash type, used for batch IDs * Remove origin from InInstruction Makes InInstructionTarget. Adds RefundableInInstruction with origin. * Document storage items in in-instructions * Rename serai/client/tests/serai.rs to updates.rs It only tested publishing updates and their successful acceptance.
2023-01-20 11:00:18 -05:00
let mut included = commitments.keys().cloned().collect::<Vec<_>>();
included.push(self.i);
included.sort_unstable();
// Convert the unified commitments to a Vec of the individual commitments
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
2022-07-15 01:26:07 -04:00
let mut commitments = (0 .. self.clsags.len())
.map(|c| {
included
2022-07-15 01:26:07 -04:00
.iter()
.map(|l| {
// Add all commitments to the transcript for their entropy
// While each CLSAG will do this as they need to for security, they have their own
// transcripts cloned from this TX's initial premise's transcript. For our TX
// transcript to have the CLSAG data for entropy, it'll have to be added ourselves here
self.transcript.append_message(b"participant", (*l).to_be_bytes());
let preprocess = if *l == self.i {
self.our_preprocess[c].clone()
2022-07-15 01:26:07 -04:00
} else {
commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone()
};
{
let mut buf = vec![];
preprocess.write(&mut buf).unwrap();
self.transcript.append_message(b"preprocess", buf);
2022-07-15 01:26:07 -04:00
}
// While here, calculate the key image
// Clsag will parse/calculate/validate this as needed, yet doing so here as well
// provides the easiest API overall, as this is where the TX is (which needs the key
// images in its message), along with where the outputs are determined (where our
// outputs may need these in order to guarantee uniqueness)
add_key_image_share(
&mut images[c],
self.key_images[c].0,
self.key_images[c].1,
&included,
*l,
preprocess.addendum.key_image.0,
);
Ok((*l, preprocess))
2022-07-15 01:26:07 -04:00
})
.collect::<Result<HashMap<_, _>, _>>()
})
.collect::<Result<Vec<_>, _>>()?;
// Remove our preprocess which shouldn't be here. It was just the easiest way to implement the
// above
for map in commitments.iter_mut() {
map.remove(&self.i);
}
2022-04-30 04:32:19 -04:00
// Create the actual transaction
Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
let (mut tx, output_masks) = {
let mut sorted_images = images.clone();
sorted_images.sort_by(key_image_sort);
Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
self.signable.prepare_transaction(
&mut ChaCha20Rng::from_seed(self.transcript.rng_seed(b"transaction_keys_bulletproofs")),
uniqueness(
Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
&sorted_images
2022-07-15 01:26:07 -04:00
.iter()
.map(|image| Input::ToKey { amount: 0, key_offsets: vec![], key_image: *image })
.collect::<Vec<_>>(),
),
)
};
// Sort the inputs, as expected
let mut sorted = Vec::with_capacity(self.clsags.len());
while !self.clsags.is_empty() {
sorted.push((
images.swap_remove(0),
self.signable.inputs.swap_remove(0),
self.decoys.swap_remove(0),
self.inputs.swap_remove(0),
self.clsags.swap_remove(0),
2022-07-15 01:26:07 -04:00
commitments.swap_remove(0),
));
}
sorted.sort_by(|x, y| key_image_sort(&x.0, &y.0));
let mut rng = ChaCha20Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks"));
let mut sum_pseudo_outs = Scalar::zero();
while !sorted.is_empty() {
let value = sorted.remove(0);
2022-05-06 19:07:37 -04:00
let mut mask = random_scalar(&mut rng);
if sorted.is_empty() {
mask = output_masks - sum_pseudo_outs;
2022-05-06 19:07:37 -04:00
} else {
sum_pseudo_outs += mask;
}
2022-07-15 01:26:07 -04:00
tx.prefix.inputs.push(Input::ToKey {
amount: 0,
key_offsets: value.2.offsets.clone(),
key_image: value.0,
});
*value.3.write().unwrap() = Some(ClsagDetails::new(
ClsagInput::new(value.1.commitment().clone(), value.2).map_err(|_| {
2022-07-15 01:26:07 -04:00
panic!("Signing an input which isn't present in the ring we created for it")
})?,
mask,
));
2022-05-06 19:07:37 -04:00
self.clsags.push(value.4);
commitments.push(value.5);
2022-04-30 04:32:19 -04:00
}
let msg = tx.signature_hash();
2022-04-30 04:32:19 -04:00
// Iterate over each CLSAG calling sign
let mut shares = Vec::with_capacity(self.clsags.len());
2022-07-15 01:26:07 -04:00
let clsags = self
.clsags
.drain(..)
.map(|clsag| {
let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?;
shares.push(share);
2022-07-15 01:26:07 -04:00
Ok(clsag)
})
.collect::<Result<_, _>>()?;
2022-04-30 04:32:19 -04:00
Ok((TransactionSignatureMachine { tx, clsags }, shares))
2022-04-30 04:32:19 -04:00
}
}
2022-04-30 04:32:19 -04:00
impl SignatureMachine<Transaction> for TransactionSignatureMachine {
type SignatureShare = Vec<SignatureShare<Ed25519>>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
self.clsags.iter().map(|clsag| clsag.read_share(reader)).collect()
}
fn complete(
mut self,
shares: HashMap<u16, Self::SignatureShare>,
) -> Result<Transaction, FrostError> {
let mut tx = self.tx;
match tx.rct_signatures.prunable {
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
for (c, clsag) in self.clsags.drain(..).enumerate() {
let (clsag, pseudo_out) = clsag.complete(
shares.iter().map(|(l, shares)| (*l, shares[c].clone())).collect::<HashMap<_, _>>(),
)?;
clsags.push(clsag);
pseudo_outs.push(pseudo_out);
}
}
2022-04-30 04:32:19 -04:00
}
Ok(tx)
}
}