2023-03-11 10:51:40 -05:00
|
|
|
use core::ops::Deref;
|
2024-07-03 13:35:19 -04:00
|
|
|
use std_shims::{vec::Vec, string::ToString, collections::HashMap};
|
2023-01-07 04:44:23 -05:00
|
|
|
|
2024-06-27 07:36:45 -04:00
|
|
|
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, edwards::CompressedEdwardsY};
|
2022-05-21 15:33:35 -04:00
|
|
|
|
2024-06-16 19:59:25 -04:00
|
|
|
use monero_rpc::{RpcError, Rpc};
|
2024-06-16 18:40:15 -04:00
|
|
|
use monero_serai::{
|
2024-06-16 12:26:14 -04:00
|
|
|
io::*,
|
|
|
|
|
primitives::Commitment,
|
2022-12-24 15:17:49 -05:00
|
|
|
transaction::{Input, Timelock, Transaction},
|
2022-08-22 12:15:14 -04:00
|
|
|
block::Block,
|
2024-06-16 18:40:15 -04:00
|
|
|
};
|
2024-07-03 13:35:19 -04:00
|
|
|
use crate::{
|
|
|
|
|
address::SubaddressIndex, ViewPair, GuaranteedViewPair, output::*, PaymentId, Extra,
|
|
|
|
|
SharedKeyDerivations,
|
|
|
|
|
};
|
2022-08-22 07:22:54 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
/// A collection of potentially additionally timelocked outputs.
|
|
|
|
|
#[derive(Zeroize, ZeroizeOnDrop)]
|
|
|
|
|
pub struct Timelocked(Vec<WalletOutput>);
|
2022-08-22 12:15:14 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
impl Timelocked {
|
|
|
|
|
/// Return the outputs which aren't subject to an additional timelock.
|
2023-07-08 11:29:05 -04:00
|
|
|
#[must_use]
|
2024-07-03 13:35:19 -04:00
|
|
|
pub fn not_additionally_locked(self) -> Vec<WalletOutput> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
for output in &self.0 {
|
|
|
|
|
if output.additional_timelock() == Timelock::None {
|
|
|
|
|
res.push(output.clone());
|
|
|
|
|
}
|
2022-08-22 12:15:14 -04:00
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
res
|
2022-08-22 12:15:14 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
/// Return the outputs whose additional timelock unlocks by the specified block/time.
|
|
|
|
|
///
|
|
|
|
|
/// Additional timelocks are almost never used outside of miner transactions, and are
|
|
|
|
|
/// increasingly planned for removal. Ignoring non-miner additionally-timelocked outputs is
|
|
|
|
|
/// recommended.
|
|
|
|
|
///
|
|
|
|
|
/// `block` is the block number of the block the additional timelock must be satsified by.
|
|
|
|
|
///
|
|
|
|
|
/// `time` is represented in seconds since the epoch. Please note Monero uses an on-chain
|
|
|
|
|
/// deterministic clock for time which is subject to variance from the real world time. This time
|
|
|
|
|
/// argument will be evaluated against Monero's clock, not the local system's clock.
|
2023-07-08 11:29:05 -04:00
|
|
|
#[must_use]
|
2024-07-03 13:35:19 -04:00
|
|
|
pub fn additional_timelock_satisfied_by(self, block: usize, time: u64) -> Vec<WalletOutput> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
for output in &self.0 {
|
|
|
|
|
if (output.additional_timelock() <= Timelock::Block(block)) ||
|
|
|
|
|
(output.additional_timelock() <= Timelock::Time(time))
|
|
|
|
|
{
|
|
|
|
|
res.push(output.clone());
|
|
|
|
|
}
|
2023-07-04 18:07:27 -04:00
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
res
|
2022-08-22 12:15:14 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
/// Ignore the timelocks and return all outputs within this container.
|
2023-07-08 11:29:05 -04:00
|
|
|
#[must_use]
|
2024-07-03 13:35:19 -04:00
|
|
|
pub fn ignore_additional_timelock(mut self) -> Vec<WalletOutput> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
core::mem::swap(&mut self.0, &mut res);
|
|
|
|
|
res
|
2022-05-26 03:51:27 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-27 07:36:45 -04:00
|
|
|
#[derive(Clone)]
|
2024-07-03 13:35:19 -04:00
|
|
|
struct InternalScanner {
|
2024-06-27 07:36:45 -04:00
|
|
|
pair: ViewPair,
|
2024-07-03 13:35:19 -04:00
|
|
|
guaranteed: bool,
|
|
|
|
|
subaddresses: HashMap<CompressedEdwardsY, Option<SubaddressIndex>>,
|
2024-06-27 07:36:45 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
impl Zeroize for InternalScanner {
|
2024-06-27 07:36:45 -04:00
|
|
|
fn zeroize(&mut self) {
|
|
|
|
|
self.pair.zeroize();
|
2024-07-03 13:35:19 -04:00
|
|
|
self.guaranteed.zeroize();
|
2024-06-27 07:36:45 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// This may not be effective, unfortunately
|
2024-06-27 07:36:45 -04:00
|
|
|
for (mut key, mut value) in self.subaddresses.drain() {
|
|
|
|
|
key.zeroize();
|
|
|
|
|
value.zeroize();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
impl Drop for InternalScanner {
|
2024-06-27 07:36:45 -04:00
|
|
|
fn drop(&mut self) {
|
|
|
|
|
self.zeroize();
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
impl ZeroizeOnDrop for InternalScanner {}
|
2024-06-27 07:36:45 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
impl InternalScanner {
|
|
|
|
|
fn new(pair: ViewPair, guaranteed: bool) -> Self {
|
2024-06-27 07:36:45 -04:00
|
|
|
let mut subaddresses = HashMap::new();
|
2024-07-03 13:35:19 -04:00
|
|
|
subaddresses.insert(pair.spend().compress(), None);
|
|
|
|
|
Self { pair, guaranteed, subaddresses }
|
2024-06-27 07:36:45 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
fn register_subaddress(&mut self, subaddress: SubaddressIndex) {
|
2024-06-27 07:36:45 -04:00
|
|
|
let (spend, _) = self.pair.subaddress_keys(subaddress);
|
|
|
|
|
self.subaddresses.insert(spend.compress(), Some(subaddress));
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
fn scan_transaction(
|
|
|
|
|
&self,
|
|
|
|
|
block_hash: [u8; 32],
|
|
|
|
|
tx_start_index_on_blockchain: u64,
|
|
|
|
|
tx: &Transaction,
|
|
|
|
|
) -> Result<Timelocked, RpcError> {
|
2023-05-27 04:13:40 -04:00
|
|
|
// Only scan RCT TXs since we can only spend RCT outputs
|
2024-06-23 10:08:51 -04:00
|
|
|
if tx.version() != 2 {
|
2024-07-03 13:35:19 -04:00
|
|
|
return Ok(Timelocked(vec![]));
|
2023-05-27 04:13:40 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// Read the extra field
|
2024-06-23 10:08:51 -04:00
|
|
|
let Ok(extra) = Extra::read::<&[u8]>(&mut tx.prefix().extra.as_ref()) else {
|
2024-07-03 13:35:19 -04:00
|
|
|
return Ok(Timelocked(vec![]));
|
2022-05-21 15:33:35 -04:00
|
|
|
};
|
2023-01-28 03:18:41 -05:00
|
|
|
|
2024-01-09 07:27:44 -08:00
|
|
|
let Some((tx_keys, additional)) = extra.keys() else {
|
2024-07-03 13:35:19 -04:00
|
|
|
return Ok(Timelocked(vec![]));
|
2023-01-28 03:18:41 -05:00
|
|
|
};
|
2022-08-22 07:22:54 -04:00
|
|
|
let payment_id = extra.payment_id();
|
2022-05-21 15:33:35 -04:00
|
|
|
|
|
|
|
|
let mut res = vec![];
|
2024-06-23 10:08:51 -04:00
|
|
|
for (o, output) in tx.prefix().outputs.iter().enumerate() {
|
2024-07-03 13:35:19 -04:00
|
|
|
let Some(output_key) = decompress_point(output.key.to_bytes()) else { continue };
|
2022-08-22 08:57:36 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// Monero checks with each TX key and with the additional key for this output
|
2022-08-30 01:02:55 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// This will be None if there's no additional keys, Some(None) if there's additional keys
|
|
|
|
|
// yet not one for this output (which is non-standard), and Some(Some(_)) if there's an
|
|
|
|
|
// additional key for this output
|
|
|
|
|
// https://github.com/monero-project/monero/
|
|
|
|
|
// blob/04a1e2875d6e35e27bb21497988a6c822d319c28/
|
|
|
|
|
// src/cryptonote_basic/cryptonote_format_utils.cpp#L1062
|
2024-01-09 07:27:44 -08:00
|
|
|
let additional = additional.as_ref().map(|additional| additional.get(o));
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
#[allow(clippy::manual_let_else)]
|
2024-01-09 07:27:44 -08:00
|
|
|
for key in tx_keys.iter().map(|key| Some(Some(key))).chain(core::iter::once(additional)) {
|
2024-07-03 13:35:19 -04:00
|
|
|
// Get the key, or continue if there isn't one
|
2023-12-17 00:01:41 -05:00
|
|
|
let key = match key {
|
|
|
|
|
Some(Some(key)) => key,
|
2024-07-03 13:35:19 -04:00
|
|
|
Some(None) | None => continue,
|
2023-01-28 03:18:41 -05:00
|
|
|
};
|
2024-07-03 13:35:19 -04:00
|
|
|
// Calculate the ECDH
|
2024-06-27 07:36:45 -04:00
|
|
|
let ecdh = Zeroizing::new(self.pair.view.deref() * key);
|
|
|
|
|
let output_derivations = SharedKeyDerivations::output_derivations(
|
2024-07-03 13:35:19 -04:00
|
|
|
if self.guaranteed {
|
2024-06-27 07:36:45 -04:00
|
|
|
Some(SharedKeyDerivations::uniqueness(&tx.prefix().inputs))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
},
|
|
|
|
|
ecdh.clone(),
|
2022-07-15 01:26:07 -04:00
|
|
|
o,
|
2022-06-28 00:01:20 -04:00
|
|
|
);
|
2022-07-27 06:29:14 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// Check the view tag matches, if there is a view tag
|
2022-07-27 06:29:14 -04:00
|
|
|
if let Some(actual_view_tag) = output.view_tag {
|
2024-06-27 07:36:45 -04:00
|
|
|
if actual_view_tag != output_derivations.view_tag {
|
2022-07-27 06:29:14 -04:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-21 15:33:35 -04:00
|
|
|
// P - shared == spend
|
2024-07-03 13:35:19 -04:00
|
|
|
let Some(subaddress) = ({
|
|
|
|
|
// The output key may be of torsion [0, 8)
|
|
|
|
|
// Our subtracting of a prime-order element means any torsion will be preserved
|
|
|
|
|
// If someone wanted to malleate output keys with distinct torsions, only one will be
|
|
|
|
|
// scanned accordingly (the one which has matching torsion of the spend key)
|
|
|
|
|
// TODO: If there's a torsioned spend key, can we spend outputs to it?
|
|
|
|
|
let subaddress_spend_key =
|
|
|
|
|
output_key - (&output_derivations.shared_key * ED25519_BASEPOINT_TABLE);
|
|
|
|
|
self.subaddresses.get(&subaddress_spend_key.compress())
|
|
|
|
|
}) else {
|
2022-06-28 00:01:20 -04:00
|
|
|
continue;
|
2024-07-03 13:35:19 -04:00
|
|
|
};
|
|
|
|
|
let subaddress = *subaddress;
|
2022-05-21 15:33:35 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// The key offset is this shared key
|
2024-06-27 07:36:45 -04:00
|
|
|
let mut key_offset = output_derivations.shared_key;
|
2023-01-07 04:44:23 -05:00
|
|
|
if let Some(subaddress) = subaddress {
|
2024-07-03 13:35:19 -04:00
|
|
|
// And if this was to a subaddress, it's additionally the offset from subaddress spend
|
|
|
|
|
// key to the normal spend key
|
2023-01-07 04:44:23 -05:00
|
|
|
key_offset += self.pair.subaddress_derivation(subaddress);
|
|
|
|
|
}
|
2022-06-28 00:01:20 -04:00
|
|
|
// Since we've found an output to us, get its amount
|
|
|
|
|
let mut commitment = Commitment::zero();
|
2022-05-21 15:33:35 -04:00
|
|
|
|
2022-06-28 00:01:20 -04:00
|
|
|
// Miner transaction
|
2023-06-29 13:16:51 -04:00
|
|
|
if let Some(amount) = output.amount {
|
|
|
|
|
commitment.amount = amount;
|
2022-06-28 00:01:20 -04:00
|
|
|
// Regular transaction
|
|
|
|
|
} else {
|
2024-06-23 10:08:51 -04:00
|
|
|
let Transaction::V2 { proofs: Some(ref proofs), .. } = &tx else {
|
2024-07-03 13:35:19 -04:00
|
|
|
// Invalid transaction, as of consensus rules at the time of writing this code
|
|
|
|
|
Err(RpcError::InvalidNode("non-miner v2 transaction without RCT proofs".to_string()))?
|
2024-06-23 10:08:51 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
commitment = match proofs.base.encrypted_amounts.get(o) {
|
2024-06-27 07:36:45 -04:00
|
|
|
Some(amount) => output_derivations.decrypt(amount),
|
2024-07-03 13:35:19 -04:00
|
|
|
// Invalid transaction, as of consensus rules at the time of writing this code
|
|
|
|
|
None => Err(RpcError::InvalidNode(
|
|
|
|
|
"RCT proofs without an encrypted amount per output".to_string(),
|
|
|
|
|
))?,
|
2022-06-28 00:01:20 -04:00
|
|
|
};
|
|
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// Rebuild the commitment to verify it
|
2024-06-23 10:08:51 -04:00
|
|
|
if Some(&commitment.calculate()) != proofs.base.commitments.get(o) {
|
2024-07-03 13:35:19 -04:00
|
|
|
continue;
|
2022-05-21 15:33:35 -04:00
|
|
|
}
|
2022-06-28 00:01:20 -04:00
|
|
|
}
|
2022-05-21 15:33:35 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
// Decrypt the payment ID
|
|
|
|
|
let payment_id = payment_id.map(|id| id ^ SharedKeyDerivations::payment_id_xor(ecdh));
|
2022-08-22 07:22:54 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
res.push(WalletOutput {
|
|
|
|
|
absolute_id: AbsoluteId {
|
|
|
|
|
transaction: tx.hash(),
|
|
|
|
|
index_in_transaction: o.try_into().unwrap(),
|
|
|
|
|
},
|
|
|
|
|
relative_id: RelativeId {
|
|
|
|
|
block: block_hash,
|
|
|
|
|
index_on_blockchain: tx_start_index_on_blockchain + u64::try_from(o).unwrap(),
|
|
|
|
|
},
|
|
|
|
|
data: OutputData {
|
|
|
|
|
key: output_key,
|
|
|
|
|
key_offset,
|
|
|
|
|
commitment,
|
|
|
|
|
additional_timelock: tx.prefix().timelock,
|
|
|
|
|
},
|
|
|
|
|
metadata: Metadata { subaddress, payment_id, arbitrary_data: extra.data() },
|
|
|
|
|
});
|
2022-08-22 08:57:36 -04:00
|
|
|
|
2022-06-28 00:01:20 -04:00
|
|
|
// Break to prevent public keys from being included multiple times, triggering multiple
|
|
|
|
|
// inclusions of the same output
|
|
|
|
|
break;
|
2022-05-21 15:33:35 -04:00
|
|
|
}
|
|
|
|
|
}
|
2022-06-02 00:00:26 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
Ok(Timelocked(res))
|
2022-05-21 15:33:35 -04:00
|
|
|
}
|
2022-08-22 12:15:14 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
async fn scan(&mut self, rpc: &impl Rpc, block: &Block) -> Result<Timelocked, RpcError> {
|
|
|
|
|
if block.header.hardfork_version > 16 {
|
|
|
|
|
Err(RpcError::InternalError(format!(
|
|
|
|
|
"scanning a hardfork {} block, when we only support up to 16",
|
|
|
|
|
block.header.hardfork_version
|
|
|
|
|
)))?;
|
|
|
|
|
}
|
2022-08-22 12:15:14 -04:00
|
|
|
|
2024-07-03 13:35:19 -04:00
|
|
|
let block_hash = block.hash();
|
|
|
|
|
|
|
|
|
|
// We get the output indexes for the miner transaction as a reference point
|
|
|
|
|
// TODO: Are miner transactions since v2 guaranteed to have an output?
|
|
|
|
|
let mut tx_start_index_on_blockchain = *rpc
|
|
|
|
|
.get_o_indexes(block.miner_transaction.hash())
|
|
|
|
|
.await?
|
|
|
|
|
.first()
|
|
|
|
|
.ok_or(RpcError::InvalidNode("miner transaction without outputs".to_string()))?;
|
|
|
|
|
|
|
|
|
|
// We obtain all TXs in full
|
|
|
|
|
let mut txs = vec![block.miner_transaction.clone()];
|
|
|
|
|
txs.extend(rpc.get_transactions(&block.transactions).await?);
|
|
|
|
|
|
|
|
|
|
let mut res = Timelocked(vec![]);
|
2023-07-08 11:29:05 -04:00
|
|
|
for tx in txs {
|
2024-07-03 13:35:19 -04:00
|
|
|
// Push all outputs into our result
|
|
|
|
|
{
|
|
|
|
|
let mut this_txs_outputs = vec![];
|
|
|
|
|
core::mem::swap(
|
|
|
|
|
&mut self.scan_transaction(block_hash, tx_start_index_on_blockchain, &tx)?.0,
|
|
|
|
|
&mut this_txs_outputs,
|
|
|
|
|
);
|
|
|
|
|
res.0.extend(this_txs_outputs);
|
2022-08-22 12:15:14 -04:00
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
|
|
|
|
|
// Update the TX start index for the next TX
|
|
|
|
|
tx_start_index_on_blockchain += u64::try_from(
|
2024-06-23 10:08:51 -04:00
|
|
|
tx.prefix()
|
2022-12-24 15:17:49 -05:00
|
|
|
.outputs
|
|
|
|
|
.iter()
|
2023-06-29 13:16:51 -04:00
|
|
|
// Filter to v2 miner TX outputs/RCT outputs since we're tracking the RCT output index
|
2022-12-24 15:17:49 -05:00
|
|
|
.filter(|output| {
|
2023-11-03 05:28:07 -04:00
|
|
|
let is_v2_miner_tx =
|
2024-06-23 10:08:51 -04:00
|
|
|
(tx.version() == 2) && matches!(tx.prefix().inputs.first(), Some(Input::Gen(..)));
|
2023-11-03 05:28:07 -04:00
|
|
|
is_v2_miner_tx || output.amount.is_none()
|
2022-12-24 15:17:49 -05:00
|
|
|
})
|
|
|
|
|
.count(),
|
|
|
|
|
)
|
|
|
|
|
.unwrap()
|
2022-08-22 12:15:14 -04:00
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
|
|
|
|
|
// If the block's version is >= 12, drop all unencrypted payment IDs
|
|
|
|
|
// TODO: Cite rule
|
|
|
|
|
// TODO: What if TX extra had multiple payment IDs embedded?
|
|
|
|
|
if block.header.hardfork_version >= 12 {
|
|
|
|
|
for output in &mut res.0 {
|
|
|
|
|
if matches!(output.metadata.payment_id, Some(PaymentId::Unencrypted(_))) {
|
|
|
|
|
output.metadata.payment_id = None;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-22 12:15:14 -04:00
|
|
|
Ok(res)
|
|
|
|
|
}
|
2022-05-21 15:33:35 -04:00
|
|
|
}
|
2024-07-03 13:35:19 -04:00
|
|
|
|
|
|
|
|
/// A transaction scanner to find outputs received.
|
|
|
|
|
///
|
|
|
|
|
/// When an output is successfully scanned, the output key MUST be checked against the local
|
|
|
|
|
/// database for lack of prior observation. If it was prior observed, that output is an instance
|
|
|
|
|
/// of the burning bug (TODO: cite) and MAY be unspendable. Only the prior received output(s) or
|
|
|
|
|
/// the newly received output will be spendable (as spending one will burn all of them).
|
|
|
|
|
///
|
|
|
|
|
/// Once checked, the output key MUST be saved to the local database so future checks can be
|
|
|
|
|
/// performed.
|
|
|
|
|
#[derive(Clone, Zeroize, ZeroizeOnDrop)]
|
|
|
|
|
pub struct Scanner(InternalScanner);
|
|
|
|
|
|
|
|
|
|
impl Scanner {
|
|
|
|
|
/// Create a Scanner from a ViewPair.
|
|
|
|
|
pub fn new(pair: ViewPair) -> Self {
|
|
|
|
|
Self(InternalScanner::new(pair, false))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Register a subaddress to scan for.
|
|
|
|
|
///
|
|
|
|
|
/// Subaddresses must be explicitly registered ahead of time in order to be successfully scanned.
|
|
|
|
|
pub fn register_subaddress(&mut self, subaddress: SubaddressIndex) {
|
|
|
|
|
self.0.register_subaddress(subaddress)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
/// Scan a transaction.
|
|
|
|
|
///
|
|
|
|
|
/// This takes in the block hash the transaction is contained in. This method is NOT recommended
|
|
|
|
|
/// and MUST be used carefully. The node will receive a request for the output indexes of the
|
|
|
|
|
/// specified transactions, which may de-anonymize which transactions belong to a user.
|
|
|
|
|
pub async fn scan_transaction(
|
|
|
|
|
&self,
|
|
|
|
|
rpc: &impl Rpc,
|
|
|
|
|
block_hash: [u8; 32],
|
|
|
|
|
tx: &Transaction,
|
|
|
|
|
) -> Result<Timelocked, RpcError> {
|
|
|
|
|
// This isn't technically illegal due to a lack of minimum output rules for a while
|
|
|
|
|
let Some(tx_start_index_on_blockchain) =
|
|
|
|
|
rpc.get_o_indexes(tx.hash()).await?.first().copied() else {
|
|
|
|
|
return Ok(Timelocked(vec![]))
|
|
|
|
|
};
|
|
|
|
|
self.0.scan_transaction(block_hash, tx_start_index_on_blockchain, tx)
|
|
|
|
|
}
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/// Scan a block.
|
|
|
|
|
pub async fn scan(&mut self, rpc: &impl Rpc, block: &Block) -> Result<Timelocked, RpcError> {
|
|
|
|
|
self.0.scan(rpc, block).await
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// A transaction scanner to find outputs received which are guaranteed to be spendable.
|
|
|
|
|
///
|
|
|
|
|
/// 'Guaranteed' outputs, or transactions outputs to the burning bug, are not officially specified
|
|
|
|
|
/// by the Monero project. They should only be used if necessary. No support outside of
|
|
|
|
|
/// monero-wallet is promised.
|
|
|
|
|
///
|
|
|
|
|
/// "guaranteed to be spendable" assumes satisfaction of any timelocks in effect.
|
|
|
|
|
#[derive(Clone, Zeroize, ZeroizeOnDrop)]
|
|
|
|
|
pub struct GuaranteedScanner(InternalScanner);
|
|
|
|
|
|
|
|
|
|
impl GuaranteedScanner {
|
|
|
|
|
/// Create a GuaranteedScanner from a GuaranteedViewPair.
|
|
|
|
|
pub fn new(pair: GuaranteedViewPair) -> Self {
|
|
|
|
|
Self(InternalScanner::new(pair.0, true))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Register a subaddress to scan for.
|
|
|
|
|
///
|
|
|
|
|
/// Subaddresses must be explicitly registered ahead of time in order to be successfully scanned.
|
|
|
|
|
pub fn register_subaddress(&mut self, subaddress: SubaddressIndex) {
|
|
|
|
|
self.0.register_subaddress(subaddress)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
/// Scan a transaction.
|
|
|
|
|
///
|
|
|
|
|
/// This takes in the block hash the transaction is contained in. This method is NOT recommended
|
|
|
|
|
/// and MUST be used carefully. The node will receive a request for the output indexes of the
|
|
|
|
|
/// specified transactions, which may de-anonymize which transactions belong to a user.
|
|
|
|
|
pub async fn scan_transaction(
|
|
|
|
|
&self,
|
|
|
|
|
rpc: &impl Rpc,
|
|
|
|
|
block_hash: [u8; 32],
|
|
|
|
|
tx: &Transaction,
|
|
|
|
|
) -> Result<Timelocked, RpcError> {
|
|
|
|
|
// This isn't technically illegal due to a lack of minimum output rules for a while
|
|
|
|
|
let Some(tx_start_index_on_blockchain) =
|
|
|
|
|
rpc.get_o_indexes(tx.hash()).await?.first().copied() else {
|
|
|
|
|
return Ok(Timelocked(vec![]))
|
|
|
|
|
};
|
|
|
|
|
self.0.scan_transaction(block_hash, tx_start_index_on_blockchain, tx)
|
|
|
|
|
}
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/// Scan a block.
|
|
|
|
|
pub async fn scan(&mut self, rpc: &impl Rpc, block: &Block) -> Result<Timelocked, RpcError> {
|
|
|
|
|
self.0.scan(rpc, block).await
|
|
|
|
|
}
|
|
|
|
|
}
|