Files
serai/processor/src/wallet.rs

355 lines
12 KiB
Rust
Raw Normal View History

Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
use std::collections::HashMap;
use rand_core::OsRng;
use group::GroupEncoding;
2022-06-05 15:10:50 -04:00
use transcript::{Transcript, RecommendedTranscript};
2022-07-15 01:26:07 -04:00
use frost::{
curve::Curve,
FrostKeys,
sign::{PreprocessMachine, SignMachine, SignatureMachine},
};
2022-07-15 01:26:07 -04:00
use crate::{
coin::{CoinError, Output, Coin},
SignError, Network,
};
pub struct WalletKeys<C: Curve> {
keys: FrostKeys<C>,
2022-07-15 01:26:07 -04:00
creation_height: usize,
}
impl<C: Curve> WalletKeys<C> {
pub fn new(keys: FrostKeys<C>, creation_height: usize) -> WalletKeys<C> {
WalletKeys { keys, creation_height }
}
// Bind this key to a specific network by applying an additive offset
// While it would be fine to just C::ID, including the group key creates distinct
// offsets instead of static offsets. Under a statically offset system, a BTC key could
// have X subtracted to find the potential group key, and then have Y added to find the
// potential ETH group key. While this shouldn't be an issue, as this isn't a private
// system, there are potentially other benefits to binding this to a specific group key
// It's no longer possible to influence group key gen to key cancel without breaking the hash
// function as well, although that degree of influence means key gen is broken already
fn bind(&self, chain: &[u8]) -> FrostKeys<C> {
const DST: &[u8] = b"Serai Processor Wallet Chain Bind";
let mut transcript = RecommendedTranscript::new(DST);
transcript.append_message(b"chain", chain);
transcript.append_message(b"curve", C::ID);
transcript.append_message(b"group_key", self.keys.group_key().to_bytes().as_ref());
self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset")))
}
}
2022-06-05 06:00:21 -04:00
pub trait CoinDb {
// Set a height as scanned to
fn scanned_to_height(&mut self, height: usize);
// Acknowledge a given coin height for a canonical height
fn acknowledge_height(&mut self, canonical: usize, height: usize);
// Adds an output to the DB. Returns false if the output was already added
fn add_output<O: Output>(&mut self, output: &O) -> bool;
// Height this coin has been scanned to
fn scanned_height(&self) -> usize;
// Acknowledged height for a given canonical height
fn acknowledged_height(&self, canonical: usize) -> usize;
}
pub struct MemCoinDb {
// Height this coin has been scanned to
scanned_height: usize,
// Acknowledged height for a given canonical height
2022-06-05 06:00:21 -04:00
acknowledged_heights: HashMap<usize, usize>,
2022-07-15 01:26:07 -04:00
outputs: HashMap<Vec<u8>, Vec<u8>>,
}
2022-06-05 06:00:21 -04:00
impl MemCoinDb {
pub fn new() -> MemCoinDb {
2022-07-15 01:26:07 -04:00
MemCoinDb { scanned_height: 0, acknowledged_heights: HashMap::new(), outputs: HashMap::new() }
2022-06-05 06:00:21 -04:00
}
}
impl CoinDb for MemCoinDb {
fn scanned_to_height(&mut self, height: usize) {
self.scanned_height = height;
}
fn acknowledge_height(&mut self, canonical: usize, height: usize) {
debug_assert!(!self.acknowledged_heights.contains_key(&canonical));
self.acknowledged_heights.insert(canonical, height);
}
fn add_output<O: Output>(&mut self, output: &O) -> bool {
// This would be insecure as we're indexing by ID and this will replace the output as a whole
// Multiple outputs may have the same ID in edge cases such as Monero, where outputs are ID'd
// by output key, not by hash + index
2022-06-05 06:00:21 -04:00
// self.outputs.insert(output.id(), output).is_some()
let id = output.id().as_ref().to_vec();
if self.outputs.contains_key(&id) {
return false;
}
self.outputs.insert(id, output.serialize());
true
}
fn scanned_height(&self) -> usize {
self.scanned_height
}
fn acknowledged_height(&self, canonical: usize) -> usize {
self.acknowledged_heights[&canonical]
}
}
fn select_inputs<C: Coin>(inputs: &mut Vec<C::Output>) -> (Vec<C::Output>, u64) {
// Sort to ensure determinism. Inefficient, yet produces the most legible code to be optimized
// later
2022-09-04 21:23:38 -04:00
inputs.sort_by_key(|a| a.amount());
// Select the maximum amount of outputs possible
let res = inputs.split_off(inputs.len() - C::MAX_INPUTS.min(inputs.len()));
// Calculate their sum value, minus the fee needed to spend them
let sum = res.iter().map(|input| input.amount()).sum();
// sum -= C::MAX_FEE; // TODO
(res, sum)
}
fn select_outputs<C: Coin>(
payments: &mut Vec<(C::Address, u64)>,
2022-07-15 01:26:07 -04:00
value: &mut u64,
) -> Vec<(C::Address, u64)> {
// Prioritize large payments which will most efficiently use large inputs
payments.sort_by(|a, b| a.1.cmp(&b.1));
// Grab the payments this will successfully fund
let mut outputs = vec![];
let mut p = payments.len();
while p != 0 {
p -= 1;
if *value >= payments[p].1 {
*value -= payments[p].1;
// Swap remove will either pop the tail or insert an element that wouldn't fit, making it
// always safe to move past
outputs.push(payments.swap_remove(p));
}
// Doesn't break in this else case as a smaller payment may still fit
}
outputs
}
// Optimizes on the expectation selected/inputs are sorted from lowest value to highest
fn refine_inputs<C: Coin>(
selected: &mut Vec<C::Output>,
inputs: &mut Vec<C::Output>,
2022-07-15 01:26:07 -04:00
mut remaining: u64,
) {
// Drop unused inputs
let mut s = 0;
while remaining > selected[s].amount() {
remaining -= selected[s].amount();
s += 1;
}
// Add them back to the inputs pool
inputs.extend(selected.drain(.. s));
// Replace large inputs with smaller ones
for s in (0 .. selected.len()).rev() {
for input in inputs.iter_mut() {
// Doesn't break due to inputs no longer being sorted
// This could be made faster if we prioritized small input usage over transaction size/fees
// TODO: Consider. This would implicitly consolidate inputs which would be advantageous
if selected[s].amount() < input.amount() {
continue;
}
// If we can successfully replace this input, do so
let diff = selected[s].amount() - input.amount();
if remaining > diff {
remaining -= diff;
let old = selected[s].clone();
selected[s] = input.clone();
*input = old;
}
}
}
}
fn select_inputs_outputs<C: Coin>(
inputs: &mut Vec<C::Output>,
2022-07-15 01:26:07 -04:00
outputs: &mut Vec<(C::Address, u64)>,
) -> (Vec<C::Output>, Vec<(C::Address, u64)>) {
if inputs.is_empty() {
return (vec![], vec![]);
}
let (mut selected, mut value) = select_inputs::<C>(inputs);
let outputs = select_outputs::<C>(outputs, &mut value);
if outputs.is_empty() {
inputs.extend(selected);
return (vec![], vec![]);
}
refine_inputs::<C>(&mut selected, inputs, value);
(selected, outputs)
}
2022-06-05 06:00:21 -04:00
pub struct Wallet<D: CoinDb, C: Coin> {
db: D,
coin: C,
Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
keys: Vec<(FrostKeys<C::Curve>, Vec<C::Output>)>,
2022-07-15 01:26:07 -04:00
pending: Vec<(usize, FrostKeys<C::Curve>)>,
}
2022-06-05 06:00:21 -04:00
impl<D: CoinDb, C: Coin> Wallet<D, C> {
pub fn new(db: D, coin: C) -> Wallet<D, C> {
2022-07-15 01:26:07 -04:00
Wallet { db, coin, keys: vec![], pending: vec![] }
}
2022-07-15 01:26:07 -04:00
pub fn scanned_height(&self) -> usize {
self.db.scanned_height()
}
pub fn acknowledge_height(&mut self, canonical: usize, height: usize) {
2022-06-05 06:00:21 -04:00
self.db.acknowledge_height(canonical, height);
if height > self.db.scanned_height() {
self.db.scanned_to_height(height);
}
}
pub fn acknowledged_height(&self, canonical: usize) -> usize {
2022-06-05 06:00:21 -04:00
self.db.acknowledged_height(canonical)
}
pub fn add_keys(&mut self, keys: &WalletKeys<C::Curve>) {
// Doesn't use +1 as this is height, not block index, and poll moves by block index
self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::ID)));
}
pub fn address(&self) -> C::Address {
self.coin.address(self.keys[self.keys.len() - 1].0.group_key())
}
pub async fn poll(&mut self) -> Result<(), CoinError> {
if self.coin.get_height().await? < C::CONFIRMATIONS {
return Ok(());
}
let confirmed_block = self.coin.get_height().await? - C::CONFIRMATIONS;
for b in self.scanned_height() ..= confirmed_block {
// If any keys activated at this height, shift them over
{
let mut k = 0;
while k < self.pending.len() {
2022-06-05 06:00:21 -04:00
// TODO
//if b < self.pending[k].0 {
//} else if b == self.pending[k].0 {
if b <= self.pending[k].0 {
Utilize zeroize (#76) * Apply Zeroize to nonces used in Bulletproofs Also makes bit decomposition constant time for a given amount of outputs. * Fix nonce reuse for single-signer CLSAG * Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data * Zeroize private keys and nonces * Merge prepare_outputs and prepare_transactions * Ensure CLSAG is constant time * Pass by borrow where needed, bug fixes The past few commitments have been one in-progress chunk which I've broken up as best read. * Add Zeroize to FROST structs Still needs to zeroize internally, yet next step. Not quite as aggressive as Monero, partially due to the limitations of HashMaps, partially due to less concern about metadata, yet does still delete a few smaller items of metadata (group key, context string...). * Remove Zeroize from most Monero multisig structs These structs largely didn't have private data, just fields with private data, yet those fields implemented ZeroizeOnDrop making them already covered. While there is still traces of the transaction left in RAM, fully purging that was never the intent. * Use Zeroize within dleq bitvec doesn't offer Zeroize, so a manual zeroing has been implemented. * Use Zeroize for random_nonce It isn't perfect, due to the inability to zeroize the digest, and due to kp256 requiring a few transformations. It does the best it can though. Does move the per-curve random_nonce to a provided one, which is allowed as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231. * Use Zeroize on FROST keygen/signing * Zeroize constant time multiexp. * Correct when FROST keygen zeroizes * Move the FROST keys Arc into FrostKeys Reduces amount of instances in memory. * Manually implement Debug for FrostCore to not leak the secret share * Misc bug fixes * clippy + multiexp test bug fixes * Correct FROST key gen share summation It leaked our own share for ourself. * Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
self.keys.push((self.pending.swap_remove(k).1, vec![]));
} else {
k += 1;
}
}
}
let block = self.coin.get_block(b).await?;
for (keys, outputs) in self.keys.iter_mut() {
outputs.extend(
2022-07-15 01:26:07 -04:00
self
.coin
.get_outputs(&block, keys.group_key())
.await?
2022-07-15 01:26:07 -04:00
.iter()
.cloned()
.filter(|output| self.db.add_output(output)),
);
}
// Blocks are zero-indexed while heights aren't
self.db.scanned_to_height(b + 1);
}
Ok(())
}
2022-06-05 06:00:21 -04:00
// This should be called whenever new outputs are received, meaning there was a new block
// If these outputs were received and sent to Substrate, it should be called after they're
// included in a block and we have results to act on
// If these outputs weren't sent to Substrate (change), it should be called immediately
// with all payments still queued from the last call
pub async fn prepare_sends(
&mut self,
canonical: usize,
payments: Vec<(C::Address, u64)>,
2022-07-15 01:26:07 -04:00
fee: C::Fee,
2022-06-05 06:00:21 -04:00
) -> Result<(Vec<(C::Address, u64)>, Vec<C::SignableTransaction>), CoinError> {
if payments.is_empty() {
2022-06-05 06:00:21 -04:00
return Ok((vec![], vec![]));
}
let acknowledged_height = self.acknowledged_height(canonical);
2022-06-05 06:00:21 -04:00
// TODO: Log schedule outputs when MAX_OUTPUTS is lower than payments.len()
// Payments is the first set of TXs in the schedule
// As each payment re-appears, let mut payments = schedule[payment] where the only input is
// the source payment
// let (mut payments, schedule) = schedule(payments);
let mut payments = payments;
let mut txs = vec![];
for (keys, outputs) in self.keys.iter_mut() {
while !outputs.is_empty() {
let (inputs, outputs) = select_inputs_outputs::<C>(outputs, &mut payments);
// If we can no longer process any payments, move to the next set of keys
if outputs.is_empty() {
debug_assert_eq!(inputs.len(), 0);
break;
}
// Create the transcript for this transaction
let mut transcript = RecommendedTranscript::new(b"Serai Processor Wallet Send");
2022-07-15 01:26:07 -04:00
transcript
.append_message(b"canonical_height", &u64::try_from(canonical).unwrap().to_le_bytes());
2022-06-05 15:10:50 -04:00
transcript.append_message(
b"acknowledged_height",
2022-07-15 01:26:07 -04:00
&u64::try_from(acknowledged_height).unwrap().to_le_bytes(),
2022-06-05 15:10:50 -04:00
);
2022-07-15 01:26:07 -04:00
transcript.append_message(b"index", &u64::try_from(txs.len()).unwrap().to_le_bytes());
2022-07-15 01:26:07 -04:00
let tx = self
.coin
.prepare_send(keys.clone(), transcript, acknowledged_height, inputs, &outputs, fee)
.await?;
// self.db.save_tx(tx) // TODO
txs.push(tx);
}
}
2022-06-05 06:00:21 -04:00
Ok((payments, txs))
}
pub async fn attempt_send<N: Network>(
&mut self,
network: &mut N,
prepared: C::SignableTransaction,
2022-07-15 01:26:07 -04:00
included: Vec<u16>,
) -> Result<(Vec<u8>, Vec<<C::Output as Output>::Id>), SignError> {
2022-07-15 01:26:07 -04:00
let attempt =
self.coin.attempt_send(prepared, &included).await.map_err(SignError::CoinError)?;
let (attempt, commitments) = attempt.preprocess(&mut OsRng);
let commitments = network.round(commitments).await.map_err(SignError::NetworkError)?;
let (attempt, share) = attempt.sign(commitments, b"").map_err(SignError::FrostError)?;
let shares = network.round(share).await.map_err(SignError::NetworkError)?;
let tx = attempt.complete(shares).map_err(SignError::FrostError)?;
self.coin.publish_transaction(&tx).await.map_err(SignError::CoinError)
}
}