mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Replace "coin" with "network"
The Processor's coins folder referred to the networks it could process, as did its Coin trait. This, and other similar cases throughout the codebase, have now been corrected. Also corrects dated documentation for a key pair is confirmed under the validator-sets pallet.
This commit is contained in:
@@ -398,7 +398,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
|||||||
key_gen::ProcessorMessage::Shares { id, shares } => {
|
key_gen::ProcessorMessage::Shares { id, shares } => {
|
||||||
Some(Transaction::DkgShares(id.attempt, shares, Transaction::empty_signed()))
|
Some(Transaction::DkgShares(id.attempt, shares, Transaction::empty_signed()))
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } => {
|
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
id.set.network, msg.network,
|
id.set.network, msg.network,
|
||||||
"processor claimed to be a different network than it was for GeneratedKeyPair",
|
"processor claimed to be a different network than it was for GeneratedKeyPair",
|
||||||
@@ -411,7 +411,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
|||||||
id.set.network,
|
id.set.network,
|
||||||
(
|
(
|
||||||
Public(substrate_key),
|
Public(substrate_key),
|
||||||
coin_key
|
network_key
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("external key from processor exceeded max external key length"),
|
.expect("external key from processor exceeded max external key length"),
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ async fn handle_key_gen<D: Db, Pro: Processors>(
|
|||||||
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
||||||
context: SubstrateContext {
|
context: SubstrateContext {
|
||||||
serai_time: block.time().unwrap(),
|
serai_time: block.time().unwrap(),
|
||||||
coin_latest_finalized_block: serai
|
network_latest_finalized_block: serai
|
||||||
.get_latest_block_for_network(block.hash(), set.network)
|
.get_latest_block_for_network(block.hash(), set.network)
|
||||||
.await?
|
.await?
|
||||||
// The processor treats this as a magic value which will cause it to find a network
|
// The processor treats this as a magic value which will cause it to find a network
|
||||||
@@ -176,7 +176,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
|
|||||||
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
|
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
|
||||||
|
|
||||||
for network in networks_with_event {
|
for network in networks_with_event {
|
||||||
let coin_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
|
let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
|
||||||
block
|
block
|
||||||
} else {
|
} else {
|
||||||
// If it's had a batch or a burn, it must have had a block acknowledged
|
// If it's had a batch or a burn, it must have had a block acknowledged
|
||||||
@@ -194,7 +194,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
|
|||||||
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
|
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||||
context: SubstrateContext {
|
context: SubstrateContext {
|
||||||
serai_time: block.time().unwrap(),
|
serai_time: block.time().unwrap(),
|
||||||
coin_latest_finalized_block,
|
network_latest_finalized_block,
|
||||||
},
|
},
|
||||||
network,
|
network,
|
||||||
block: block.number(),
|
block: block.number(),
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ encoded into transactions on connected networks. Serai will parse included
|
|||||||
instructions when it receives coins, executing the included specs.
|
instructions when it receives coins, executing the included specs.
|
||||||
|
|
||||||
- Out Instructions detail how to transfer coins, either to a Serai address or
|
- Out Instructions detail how to transfer coins, either to a Serai address or
|
||||||
an address native to the coin in question.
|
an address native to the network of the coins in question.
|
||||||
|
|
||||||
A transaction containing an In Instruction and an Out Instruction (to a native
|
A transaction containing an In Instruction and an Out Instruction (to a native
|
||||||
address) will receive coins to Serai and send coins from Serai, without
|
address) will receive coins to Serai and send coins from Serai, without
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ protocol.
|
|||||||
| Session | u32 |
|
| Session | u32 |
|
||||||
| Validator Set | (Session, NetworkId) |
|
| Validator Set | (Session, NetworkId) |
|
||||||
| Key | BoundedVec\<u8, 96> |
|
| Key | BoundedVec\<u8, 96> |
|
||||||
|
| KeyPair | (SeraiAddress, Key) |
|
||||||
| ExternalAddress | BoundedVec\<u8, 128> |
|
| ExternalAddress | BoundedVec\<u8, 128> |
|
||||||
| Data | BoundedVec\<u8, 512> |
|
| Data | BoundedVec\<u8, 512> |
|
||||||
|
|
||||||
|
|||||||
@@ -30,19 +30,16 @@ reject newly added coins which would cross that threshold.
|
|||||||
Multisigs are created by processors, communicating via their Coordinators.
|
Multisigs are created by processors, communicating via their Coordinators.
|
||||||
They're then confirmed on chain via the `validator-sets` pallet. This is done by
|
They're then confirmed on chain via the `validator-sets` pallet. This is done by
|
||||||
having 100% of participants agree on the resulting group key. While this isn't
|
having 100% of participants agree on the resulting group key. While this isn't
|
||||||
fault tolerant, a malicious actor who forces a `t`-of-`n` multisig to be
|
fault tolerant regarding liveliness, a malicious actor who forces a `t`-of-`n`
|
||||||
`t`-of-`n-1` reduces the fault tolerance of the multisig which is a greater
|
multisig to be `t`-of-`n-1` reduces the fault tolerance of the created multisig
|
||||||
issue. If a node does prevent multisig creation, other validators should issue
|
which is a greater issue. If a node does prevent multisig creation, other
|
||||||
slashes for it/remove it from the Validator Set entirely.
|
validators should issue slashes for it/remove it from the Validator Set
|
||||||
|
entirely.
|
||||||
|
|
||||||
Due to the fact multiple key generations may occur to account for
|
Placing the creation on chain also solves the question of if the multisig was
|
||||||
faulty/malicious nodes, voting on multiple keys for a single coin is allowed,
|
successfully created or not. Processors cannot simply ask each other if they
|
||||||
with the first key to be confirmed becoming the key for that coin.
|
succeeded without creating an instance of the Byzantine Generals Problem.
|
||||||
|
Placing results within a Byzantine Fault Tolerant system resolves this.
|
||||||
Placing it on chain also solves the question of if the multisig was successfully
|
|
||||||
created or not. Processors cannot simply ask each other if they succeeded
|
|
||||||
without creating an instance of the Byzantine Generals Problem. Placing results
|
|
||||||
within a Byzantine Fault Tolerant system resolves this.
|
|
||||||
|
|
||||||
### Multisig Lifetime
|
### Multisig Lifetime
|
||||||
|
|
||||||
@@ -61,9 +58,11 @@ no longer eligible to receive coins and they forward all of their coins to the
|
|||||||
new set of keys. It is only then that validators in the previous instance of the
|
new set of keys. It is only then that validators in the previous instance of the
|
||||||
set, yet not the current instance, may unbond their stake.
|
set, yet not the current instance, may unbond their stake.
|
||||||
|
|
||||||
### Vote (message)
|
### Set Keys (message)
|
||||||
|
|
||||||
- `coin` (Coin): Coin whose key is being voted for.
|
- `network` (Network): Network whose key is being voted for.
|
||||||
- `key` (Key): Key being voted on.
|
- `key_pair` (KeyPair): Key pair being set for this `Session`.
|
||||||
|
- `signature` (Signature): A MuSig-style signature of all validators,
|
||||||
|
confirming this key.
|
||||||
|
|
||||||
Once a key is voted on by every member, it's adopted as detailed above.
|
Once a key is voted on by every member, it's adopted as detailed above.
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use validator_sets_primitives::{ValidatorSet, KeyPair};
|
|||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
|
||||||
pub struct SubstrateContext {
|
pub struct SubstrateContext {
|
||||||
pub serai_time: u64,
|
pub serai_time: u64,
|
||||||
pub coin_latest_finalized_block: BlockHash,
|
pub network_latest_finalized_block: BlockHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod key_gen {
|
pub mod key_gen {
|
||||||
@@ -50,7 +50,7 @@ pub mod key_gen {
|
|||||||
// Created shares for the specified key generation protocol.
|
// Created shares for the specified key generation protocol.
|
||||||
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
|
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
|
||||||
// Resulting keys from the specified key generation protocol.
|
// Resulting keys from the specified key generation protocol.
|
||||||
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], coin_key: Vec<u8> },
|
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec<u8> },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -165,7 +165,7 @@ pub mod substrate {
|
|||||||
CoordinatorMessage::ConfirmKeyPair { context, .. } => context,
|
CoordinatorMessage::ConfirmKeyPair { context, .. } => context,
|
||||||
CoordinatorMessage::SubstrateBlock { context, .. } => context,
|
CoordinatorMessage::SubstrateBlock { context, .. } => context,
|
||||||
};
|
};
|
||||||
Some(context.coin_latest_finalized_block)
|
Some(context.network_latest_finalized_block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,7 +263,7 @@ impl CoordinatorMessage {
|
|||||||
}
|
}
|
||||||
CoordinatorMessage::Sign(msg) => {
|
CoordinatorMessage::Sign(msg) => {
|
||||||
let (sub, id) = match msg {
|
let (sub, id) = match msg {
|
||||||
// Unique since SignId includes a hash of the coin, and specific transaction info
|
// Unique since SignId includes a hash of the network, and specific transaction info
|
||||||
sign::CoordinatorMessage::Preprocesses { id, .. } => (0, bincode::serialize(id).unwrap()),
|
sign::CoordinatorMessage::Preprocesses { id, .. } => (0, bincode::serialize(id).unwrap()),
|
||||||
sign::CoordinatorMessage::Shares { id, .. } => (1, bincode::serialize(id).unwrap()),
|
sign::CoordinatorMessage::Shares { id, .. } => (1, bincode::serialize(id).unwrap()),
|
||||||
sign::CoordinatorMessage::Reattempt { id } => (2, bincode::serialize(id).unwrap()),
|
sign::CoordinatorMessage::Reattempt { id } => (2, bincode::serialize(id).unwrap()),
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use ciphersuite::Ciphersuite;
|
use ciphersuite::Ciphersuite;
|
||||||
|
|
||||||
use crate::coins::Coin;
|
use crate::networks::Network;
|
||||||
|
|
||||||
// Generate a static additional key for a given chain in a globally consistent manner
|
// Generate a static additional key for a given chain in a globally consistent manner
|
||||||
// Doesn't consider the current group key to increase the simplicity of verifying Serai's status
|
// Doesn't consider the current group key to increase the simplicity of verifying Serai's status
|
||||||
// Takes an index, k, to support protocols which use multiple secondary keys
|
// Takes an index, k, to support protocols which use multiple secondary keys
|
||||||
// Presumably a view key
|
// Presumably a view key
|
||||||
pub fn additional_key<C: Coin>(k: u64) -> <C::Curve as Ciphersuite>::F {
|
pub fn additional_key<N: Network>(k: u64) -> <N::Curve as Ciphersuite>::F {
|
||||||
<C::Curve as Ciphersuite>::hash_to_F(
|
<N::Curve as Ciphersuite>::hash_to_F(
|
||||||
b"Serai DEX Additional Key",
|
b"Serai DEX Additional Key",
|
||||||
&[C::ID.as_bytes(), &k.to_le_bytes()].concat(),
|
&[N::ID.as_bytes(), &k.to_le_bytes()].concat(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ use core::marker::PhantomData;
|
|||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
|
|
||||||
use crate::{Plan, coins::Coin};
|
use crate::{Plan, networks::Network};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct MainDb<C: Coin, D: Db>(D, PhantomData<C>);
|
pub struct MainDb<N: Network, D: Db>(D, PhantomData<N>);
|
||||||
impl<C: Coin, D: Db> MainDb<C, D> {
|
impl<N: Network, D: Db> MainDb<N, D> {
|
||||||
pub fn new(db: D) -> Self {
|
pub fn new(db: D) -> Self {
|
||||||
Self(db, PhantomData)
|
Self(db, PhantomData)
|
||||||
}
|
}
|
||||||
@@ -31,7 +31,7 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||||||
fn signing_key(key: &[u8]) -> Vec<u8> {
|
fn signing_key(key: &[u8]) -> Vec<u8> {
|
||||||
Self::main_key(b"signing", key)
|
Self::main_key(b"signing", key)
|
||||||
}
|
}
|
||||||
pub fn save_signing(txn: &mut D::Transaction<'_>, key: &[u8], block_number: u64, plan: &Plan<C>) {
|
pub fn save_signing(txn: &mut D::Transaction<'_>, key: &[u8], block_number: u64, plan: &Plan<N>) {
|
||||||
let id = plan.id();
|
let id = plan.id();
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -56,7 +56,7 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn signing(&self, key: &[u8]) -> Vec<(u64, Plan<C>)> {
|
pub fn signing(&self, key: &[u8]) -> Vec<(u64, Plan<N>)> {
|
||||||
let signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]);
|
let signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]);
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||||||
let buf = self.0.get(Self::plan_key(id)).unwrap();
|
let buf = self.0.get(Self::plan_key(id)).unwrap();
|
||||||
|
|
||||||
let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap());
|
let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap());
|
||||||
let plan = Plan::<C>::read::<&[u8]>(&mut &buf[16 ..]).unwrap();
|
let plan = Plan::<N>::read::<&[u8]>(&mut &buf[16 ..]).unwrap();
|
||||||
assert_eq!(id, &plan.id());
|
assert_eq!(id, &plan.id());
|
||||||
res.push((block_number, plan));
|
res.push((block_number, plan));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,17 +18,17 @@ use log::info;
|
|||||||
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
|
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
|
||||||
use messages::key_gen::*;
|
use messages::key_gen::*;
|
||||||
|
|
||||||
use crate::{Get, DbTxn, Db, coins::Coin};
|
use crate::{Get, DbTxn, Db, networks::Network};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct KeyConfirmed<C: Ciphersuite> {
|
pub struct KeyConfirmed<C: Ciphersuite> {
|
||||||
pub substrate_keys: ThresholdKeys<Ristretto>,
|
pub substrate_keys: ThresholdKeys<Ristretto>,
|
||||||
pub coin_keys: ThresholdKeys<C>,
|
pub network_keys: ThresholdKeys<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct KeyGenDb<C: Coin, D: Db>(PhantomData<D>, PhantomData<C>);
|
struct KeyGenDb<N: Network, D: Db>(PhantomData<D>, PhantomData<N>);
|
||||||
impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
impl<N: Network, D: Db> KeyGenDb<N, D> {
|
||||||
fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||||
D::key(b"KEY_GEN", dst, key)
|
D::key(b"KEY_GEN", dst, key)
|
||||||
}
|
}
|
||||||
@@ -71,39 +71,42 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
id: &KeyGenId,
|
id: &KeyGenId,
|
||||||
substrate_keys: &ThresholdCore<Ristretto>,
|
substrate_keys: &ThresholdCore<Ristretto>,
|
||||||
coin_keys: &ThresholdKeys<C::Curve>,
|
network_keys: &ThresholdKeys<N::Curve>,
|
||||||
) {
|
) {
|
||||||
let mut keys = substrate_keys.serialize();
|
let mut keys = substrate_keys.serialize();
|
||||||
keys.extend(coin_keys.serialize().iter());
|
keys.extend(network_keys.serialize().iter());
|
||||||
txn.put(
|
txn.put(
|
||||||
Self::generated_keys_key(
|
Self::generated_keys_key(
|
||||||
id.set,
|
id.set,
|
||||||
(substrate_keys.group_key().to_bytes().as_ref(), coin_keys.group_key().to_bytes().as_ref()),
|
(
|
||||||
|
substrate_keys.group_key().to_bytes().as_ref(),
|
||||||
|
network_keys.group_key().to_bytes().as_ref(),
|
||||||
|
),
|
||||||
),
|
),
|
||||||
keys,
|
keys,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn keys_key(key: &<C::Curve as Ciphersuite>::G) -> Vec<u8> {
|
fn keys_key(key: &<N::Curve as Ciphersuite>::G) -> Vec<u8> {
|
||||||
Self::key_gen_key(b"keys", key.to_bytes())
|
Self::key_gen_key(b"keys", key.to_bytes())
|
||||||
}
|
}
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
fn read_keys<G: Get>(
|
fn read_keys<G: Get>(
|
||||||
getter: &G,
|
getter: &G,
|
||||||
key: &[u8],
|
key: &[u8],
|
||||||
) -> (Vec<u8>, (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>)) {
|
) -> (Vec<u8>, (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>)) {
|
||||||
let keys_vec = getter.get(key).unwrap();
|
let keys_vec = getter.get(key).unwrap();
|
||||||
let mut keys_ref: &[u8] = keys_vec.as_ref();
|
let mut keys_ref: &[u8] = keys_vec.as_ref();
|
||||||
let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
|
let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
|
||||||
let mut coin_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
|
let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
|
||||||
C::tweak_keys(&mut coin_keys);
|
N::tweak_keys(&mut network_keys);
|
||||||
(keys_vec, (substrate_keys, coin_keys))
|
(keys_vec, (substrate_keys, network_keys))
|
||||||
}
|
}
|
||||||
fn confirm_keys(
|
fn confirm_keys(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>) {
|
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>) {
|
||||||
let (keys_vec, keys) = Self::read_keys(
|
let (keys_vec, keys) = Self::read_keys(
|
||||||
txn,
|
txn,
|
||||||
&Self::generated_keys_key(set, (key_pair.0.as_ref(), key_pair.1.as_ref())),
|
&Self::generated_keys_key(set, (key_pair.0.as_ref(), key_pair.1.as_ref())),
|
||||||
@@ -111,8 +114,8 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||||||
assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes());
|
assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
{
|
{
|
||||||
let coin_key: &[u8] = key_pair.1.as_ref();
|
let network_key: &[u8] = key_pair.1.as_ref();
|
||||||
coin_key
|
network_key
|
||||||
},
|
},
|
||||||
keys.1.group_key().to_bytes().as_ref(),
|
keys.1.group_key().to_bytes().as_ref(),
|
||||||
);
|
);
|
||||||
@@ -121,8 +124,8 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||||||
}
|
}
|
||||||
fn keys<G: Get>(
|
fn keys<G: Get>(
|
||||||
getter: &G,
|
getter: &G,
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>) {
|
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>) {
|
||||||
let res = Self::read_keys(getter, &Self::keys_key(key)).1;
|
let res = Self::read_keys(getter, &Self::keys_key(key)).1;
|
||||||
assert_eq!(&res.1.group_key(), key);
|
assert_eq!(&res.1.group_key(), key);
|
||||||
res
|
res
|
||||||
@@ -133,32 +136,32 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||||||
/// 1) It either didn't send its response, so the attempt will be aborted
|
/// 1) It either didn't send its response, so the attempt will be aborted
|
||||||
/// 2) It did send its response, and has locally saved enough data to continue
|
/// 2) It did send its response, and has locally saved enough data to continue
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct KeyGen<C: Coin, D: Db> {
|
pub struct KeyGen<N: Network, D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
entropy: Zeroizing<[u8; 32]>,
|
entropy: Zeroizing<[u8; 32]>,
|
||||||
|
|
||||||
active_commit:
|
active_commit:
|
||||||
HashMap<ValidatorSet, (SecretShareMachine<Ristretto>, SecretShareMachine<C::Curve>)>,
|
HashMap<ValidatorSet, (SecretShareMachine<Ristretto>, SecretShareMachine<N::Curve>)>,
|
||||||
active_share: HashMap<ValidatorSet, (KeyMachine<Ristretto>, KeyMachine<C::Curve>)>,
|
active_share: HashMap<ValidatorSet, (KeyMachine<Ristretto>, KeyMachine<N::Curve>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin, D: Db> KeyGen<C, D> {
|
impl<N: Network, D: Db> KeyGen<N, D> {
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<C, D> {
|
pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<N, D> {
|
||||||
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
|
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keys(
|
pub fn keys(
|
||||||
&self,
|
&self,
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>) {
|
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>) {
|
||||||
// This is safe, despite not having a txn, since it's a static value
|
// This is safe, despite not having a txn, since it's a static value
|
||||||
// The only concern is it may not be set when expected, or it may be set unexpectedly
|
// The only concern is it may not be set when expected, or it may be set unexpectedly
|
||||||
// Since this unwraps, it being unset when expected to be set will cause a panic
|
// Since this unwraps, it being unset when expected to be set will cause a panic
|
||||||
// The only other concern is if it's set when it's not safe to use
|
// The only other concern is if it's set when it's not safe to use
|
||||||
// The keys are only written on confirmation, and the transaction writing them is atomic to
|
// The keys are only written on confirmation, and the transaction writing them is atomic to
|
||||||
// every associated operation
|
// every associated operation
|
||||||
KeyGenDb::<C, D>::keys(&self.db, key)
|
KeyGenDb::<N, D>::keys(&self.db, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle(
|
pub async fn handle(
|
||||||
@@ -187,8 +190,8 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
let key_gen_machines = |id, params| {
|
let key_gen_machines = |id, params| {
|
||||||
let mut rng = coefficients_rng(id);
|
let mut rng = coefficients_rng(id);
|
||||||
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
|
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
|
||||||
let coin = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
|
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
|
||||||
((substrate.0, coin.0), (substrate.1, coin.1))
|
((substrate.0, network.0), (substrate.1, network.1))
|
||||||
};
|
};
|
||||||
|
|
||||||
match msg {
|
match msg {
|
||||||
@@ -200,7 +203,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
self.active_share.remove(&id.set).is_none()
|
self.active_share.remove(&id.set).is_none()
|
||||||
{
|
{
|
||||||
// If we haven't handled this set before, save the params
|
// If we haven't handled this set before, save the params
|
||||||
KeyGenDb::<C, D>::save_params(txn, &id.set, ¶ms);
|
KeyGenDb::<N, D>::save_params(txn, &id.set, ¶ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (machines, commitments) = key_gen_machines(id, params);
|
let (machines, commitments) = key_gen_machines(id, params);
|
||||||
@@ -221,7 +224,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
panic!("commitments when already handled commitments");
|
panic!("commitments when already handled commitments");
|
||||||
}
|
}
|
||||||
|
|
||||||
let params = KeyGenDb::<C, D>::params(txn, &id.set);
|
let params = KeyGenDb::<N, D>::params(txn, &id.set);
|
||||||
|
|
||||||
// Unwrap the machines, rebuilding them if we didn't have them in our cache
|
// Unwrap the machines, rebuilding them if we didn't have them in our cache
|
||||||
// We won't if the processor rebooted
|
// We won't if the processor rebooted
|
||||||
@@ -264,7 +267,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
|
|
||||||
let (substrate_machine, mut substrate_shares) =
|
let (substrate_machine, mut substrate_shares) =
|
||||||
handle_machine::<Ristretto>(&mut rng, params, machines.0, &mut commitments_ref);
|
handle_machine::<Ristretto>(&mut rng, params, machines.0, &mut commitments_ref);
|
||||||
let (coin_machine, coin_shares) =
|
let (network_machine, network_shares) =
|
||||||
handle_machine(&mut rng, params, machines.1, &mut commitments_ref);
|
handle_machine(&mut rng, params, machines.1, &mut commitments_ref);
|
||||||
|
|
||||||
for (_, commitments) in commitments_ref {
|
for (_, commitments) in commitments_ref {
|
||||||
@@ -273,15 +276,15 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.active_share.insert(id.set, (substrate_machine, coin_machine));
|
self.active_share.insert(id.set, (substrate_machine, network_machine));
|
||||||
|
|
||||||
let mut shares: HashMap<_, _> =
|
let mut shares: HashMap<_, _> =
|
||||||
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
|
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
|
||||||
for (i, share) in shares.iter_mut() {
|
for (i, share) in shares.iter_mut() {
|
||||||
share.extend(coin_shares[i].serialize());
|
share.extend(network_shares[i].serialize());
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyGenDb::<C, D>::save_commitments(txn, &id, &commitments);
|
KeyGenDb::<N, D>::save_commitments(txn, &id, &commitments);
|
||||||
|
|
||||||
ProcessorMessage::Shares { id, shares }
|
ProcessorMessage::Shares { id, shares }
|
||||||
}
|
}
|
||||||
@@ -289,13 +292,13 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
CoordinatorMessage::Shares { id, shares } => {
|
CoordinatorMessage::Shares { id, shares } => {
|
||||||
info!("Received shares for {:?}", id);
|
info!("Received shares for {:?}", id);
|
||||||
|
|
||||||
let params = KeyGenDb::<C, D>::params(txn, &id.set);
|
let params = KeyGenDb::<N, D>::params(txn, &id.set);
|
||||||
|
|
||||||
// Same commentary on inconsistency as above exists
|
// Same commentary on inconsistency as above exists
|
||||||
let machines = self.active_share.remove(&id.set).unwrap_or_else(|| {
|
let machines = self.active_share.remove(&id.set).unwrap_or_else(|| {
|
||||||
let machines = key_gen_machines(id, params).0;
|
let machines = key_gen_machines(id, params).0;
|
||||||
let mut rng = secret_shares_rng(id);
|
let mut rng = secret_shares_rng(id);
|
||||||
let commitments = KeyGenDb::<C, D>::commitments(txn, &id);
|
let commitments = KeyGenDb::<N, D>::commitments(txn, &id);
|
||||||
|
|
||||||
let mut commitments_ref: HashMap<Participant, &[u8]> =
|
let mut commitments_ref: HashMap<Participant, &[u8]> =
|
||||||
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
|
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
|
||||||
@@ -358,7 +361,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
|
let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
|
||||||
let coin_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
|
let network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
|
||||||
|
|
||||||
for (_, shares) in shares_ref {
|
for (_, shares) in shares_ref {
|
||||||
if !shares.is_empty() {
|
if !shares.is_empty() {
|
||||||
@@ -366,15 +369,15 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut coin_keys = ThresholdKeys::new(coin_keys);
|
let mut network_keys = ThresholdKeys::new(network_keys);
|
||||||
C::tweak_keys(&mut coin_keys);
|
N::tweak_keys(&mut network_keys);
|
||||||
|
|
||||||
KeyGenDb::<C, D>::save_keys(txn, &id, &substrate_keys, &coin_keys);
|
KeyGenDb::<N, D>::save_keys(txn, &id, &substrate_keys, &network_keys);
|
||||||
|
|
||||||
ProcessorMessage::GeneratedKeyPair {
|
ProcessorMessage::GeneratedKeyPair {
|
||||||
id,
|
id,
|
||||||
substrate_key: substrate_keys.group_key().to_bytes(),
|
substrate_key: substrate_keys.group_key().to_bytes(),
|
||||||
coin_key: coin_keys.group_key().to_bytes().as_ref().to_vec(),
|
network_key: network_keys.group_key().to_bytes().as_ref().to_vec(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -385,16 +388,16 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
) -> KeyConfirmed<C::Curve> {
|
) -> KeyConfirmed<N::Curve> {
|
||||||
let (substrate_keys, coin_keys) = KeyGenDb::<C, D>::confirm_keys(txn, set, key_pair);
|
let (substrate_keys, network_keys) = KeyGenDb::<N, D>::confirm_keys(txn, set, key_pair);
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Confirmed key pair {} {} for set {:?}",
|
"Confirmed key pair {} {} for set {:?}",
|
||||||
hex::encode(substrate_keys.group_key().to_bytes()),
|
hex::encode(substrate_keys.group_key().to_bytes()),
|
||||||
hex::encode(coin_keys.group_key().to_bytes()),
|
hex::encode(network_keys.group_key().to_bytes()),
|
||||||
set,
|
set,
|
||||||
);
|
);
|
||||||
|
|
||||||
KeyConfirmed { substrate_keys, coin_keys }
|
KeyConfirmed { substrate_keys, network_keys }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
mod plan;
|
mod plan;
|
||||||
pub use plan::*;
|
pub use plan::*;
|
||||||
|
|
||||||
pub mod coins;
|
pub mod networks;
|
||||||
|
|
||||||
mod additional_key;
|
mod additional_key;
|
||||||
pub use additional_key::additional_key;
|
pub use additional_key::additional_key;
|
||||||
|
|||||||
@@ -31,12 +31,12 @@ use message_queue::{Service, client::MessageQueue};
|
|||||||
mod plan;
|
mod plan;
|
||||||
pub use plan::*;
|
pub use plan::*;
|
||||||
|
|
||||||
mod coins;
|
mod networks;
|
||||||
use coins::{OutputType, Output, PostFeeBranch, Block, Coin};
|
use networks::{OutputType, Output, PostFeeBranch, Block, Network};
|
||||||
#[cfg(feature = "bitcoin")]
|
#[cfg(feature = "bitcoin")]
|
||||||
use coins::Bitcoin;
|
use networks::Bitcoin;
|
||||||
#[cfg(feature = "monero")]
|
#[cfg(feature = "monero")]
|
||||||
use coins::Monero;
|
use networks::Monero;
|
||||||
|
|
||||||
mod additional_key;
|
mod additional_key;
|
||||||
pub use additional_key::additional_key;
|
pub use additional_key::additional_key;
|
||||||
@@ -65,9 +65,9 @@ use scheduler::Scheduler;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
async fn get_latest_block_number<C: Coin>(coin: &C) -> usize {
|
async fn get_latest_block_number<N: Network>(network: &N) -> usize {
|
||||||
loop {
|
loop {
|
||||||
match coin.get_latest_block_number().await {
|
match network.get_latest_block_number().await {
|
||||||
Ok(number) => {
|
Ok(number) => {
|
||||||
return number;
|
return number;
|
||||||
}
|
}
|
||||||
@@ -82,9 +82,9 @@ async fn get_latest_block_number<C: Coin>(coin: &C) -> usize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_block<C: Coin>(coin: &C, block_number: usize) -> C::Block {
|
async fn get_block<N: Network>(network: &N, block_number: usize) -> N::Block {
|
||||||
loop {
|
loop {
|
||||||
match coin.get_block(block_number).await {
|
match network.get_block(block_number).await {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
@@ -96,20 +96,20 @@ async fn get_block<C: Coin>(coin: &C, block_number: usize) -> C::Block {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_fee<C: Coin>(coin: &C, block_number: usize) -> C::Fee {
|
async fn get_fee<N: Network>(network: &N, block_number: usize) -> N::Fee {
|
||||||
// TODO2: Use an fee representative of several blocks
|
// TODO2: Use an fee representative of several blocks
|
||||||
get_block(coin, block_number).await.median_fee()
|
get_block(network, block_number).await.median_fee()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn prepare_send<C: Coin>(
|
async fn prepare_send<N: Network>(
|
||||||
coin: &C,
|
network: &N,
|
||||||
keys: ThresholdKeys<C::Curve>,
|
keys: ThresholdKeys<N::Curve>,
|
||||||
block_number: usize,
|
block_number: usize,
|
||||||
fee: C::Fee,
|
fee: N::Fee,
|
||||||
plan: Plan<C>,
|
plan: Plan<N>,
|
||||||
) -> (Option<(C::SignableTransaction, C::Eventuality)>, Vec<PostFeeBranch>) {
|
) -> (Option<(N::SignableTransaction, N::Eventuality)>, Vec<PostFeeBranch>) {
|
||||||
loop {
|
loop {
|
||||||
match coin.prepare_send(keys.clone(), block_number, plan.clone(), fee).await {
|
match network.prepare_send(keys.clone(), block_number, plan.clone(), fee).await {
|
||||||
Ok(prepared) => {
|
Ok(prepared) => {
|
||||||
return prepared;
|
return prepared;
|
||||||
}
|
}
|
||||||
@@ -129,7 +129,7 @@ async fn prepare_send<C: Coin>(
|
|||||||
// Items which are mutably borrowed by Tributary.
|
// Items which are mutably borrowed by Tributary.
|
||||||
// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't
|
// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't
|
||||||
// violated.
|
// violated.
|
||||||
struct TributaryMutable<C: Coin, D: Db> {
|
struct TributaryMutable<N: Network, D: Db> {
|
||||||
// The following are actually mutably borrowed by Substrate as well.
|
// The following are actually mutably borrowed by Substrate as well.
|
||||||
// - Substrate triggers key gens, and determines which to use.
|
// - Substrate triggers key gens, and determines which to use.
|
||||||
// - SubstrateBlock events cause scheduling which causes signing.
|
// - SubstrateBlock events cause scheduling which causes signing.
|
||||||
@@ -148,8 +148,8 @@ struct TributaryMutable<C: Coin, D: Db> {
|
|||||||
// The only other note is how the scanner may cause a signer task to be dropped, effectively
|
// The only other note is how the scanner may cause a signer task to be dropped, effectively
|
||||||
// invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage
|
// invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage
|
||||||
// of a dropped task.
|
// of a dropped task.
|
||||||
key_gen: KeyGen<C, D>,
|
key_gen: KeyGen<N, D>,
|
||||||
signers: HashMap<Vec<u8>, Signer<C, D>>,
|
signers: HashMap<Vec<u8>, Signer<N, D>>,
|
||||||
|
|
||||||
// This is also mutably borrowed by the Scanner.
|
// This is also mutably borrowed by the Scanner.
|
||||||
// The Scanner starts new sign tasks.
|
// The Scanner starts new sign tasks.
|
||||||
@@ -164,7 +164,7 @@ struct TributaryMutable<C: Coin, D: Db> {
|
|||||||
// Items which are mutably borrowed by Substrate.
|
// Items which are mutably borrowed by Substrate.
|
||||||
// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't
|
// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't
|
||||||
// violated.
|
// violated.
|
||||||
struct SubstrateMutable<C: Coin, D: Db> {
|
struct SubstrateMutable<N: Network, D: Db> {
|
||||||
// The scanner is expected to autonomously operate, scanning blocks as they appear.
|
// The scanner is expected to autonomously operate, scanning blocks as they appear.
|
||||||
// When a block is sufficiently confirmed, the scanner mutates the signer to try and get a Batch
|
// When a block is sufficiently confirmed, the scanner mutates the signer to try and get a Batch
|
||||||
// signed.
|
// signed.
|
||||||
@@ -174,26 +174,26 @@ struct SubstrateMutable<C: Coin, D: Db> {
|
|||||||
// This can't be mutated as soon as a Batch is signed since the mutation which occurs then is
|
// This can't be mutated as soon as a Batch is signed since the mutation which occurs then is
|
||||||
// paired with the mutations caused by Burn events. Substrate's ordering determines if such a
|
// paired with the mutations caused by Burn events. Substrate's ordering determines if such a
|
||||||
// pairing exists.
|
// pairing exists.
|
||||||
scanner: ScannerHandle<C, D>,
|
scanner: ScannerHandle<N, D>,
|
||||||
|
|
||||||
// Schedulers take in new outputs, from the scanner, and payments, from Burn events on Substrate.
|
// Schedulers take in new outputs, from the scanner, and payments, from Burn events on Substrate.
|
||||||
// These are paired when possible, in the name of efficiency. Accordingly, both mutations must
|
// These are paired when possible, in the name of efficiency. Accordingly, both mutations must
|
||||||
// happen by Substrate.
|
// happen by Substrate.
|
||||||
schedulers: HashMap<Vec<u8>, Scheduler<C>>,
|
schedulers: HashMap<Vec<u8>, Scheduler<N>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sign_plans<C: Coin, D: Db>(
|
async fn sign_plans<N: Network, D: Db>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
coin: &C,
|
network: &N,
|
||||||
substrate_mutable: &mut SubstrateMutable<C, D>,
|
substrate_mutable: &mut SubstrateMutable<N, D>,
|
||||||
signers: &mut HashMap<Vec<u8>, Signer<C, D>>,
|
signers: &mut HashMap<Vec<u8>, Signer<N, D>>,
|
||||||
context: SubstrateContext,
|
context: SubstrateContext,
|
||||||
plans: Vec<Plan<C>>,
|
plans: Vec<Plan<N>>,
|
||||||
) {
|
) {
|
||||||
let mut plans = VecDeque::from(plans);
|
let mut plans = VecDeque::from(plans);
|
||||||
|
|
||||||
let mut block_hash = <C::Block as Block<C>>::Id::default();
|
let mut block_hash = <N::Block as Block<N>>::Id::default();
|
||||||
block_hash.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0);
|
block_hash.as_mut().copy_from_slice(&context.network_latest_finalized_block.0);
|
||||||
// block_number call is safe since it unwraps
|
// block_number call is safe since it unwraps
|
||||||
let block_number = substrate_mutable
|
let block_number = substrate_mutable
|
||||||
.scanner
|
.scanner
|
||||||
@@ -201,16 +201,16 @@ async fn sign_plans<C: Coin, D: Db>(
|
|||||||
.await
|
.await
|
||||||
.expect("told to sign_plans on a context we're not synced to");
|
.expect("told to sign_plans on a context we're not synced to");
|
||||||
|
|
||||||
let fee = get_fee(coin, block_number).await;
|
let fee = get_fee(network, block_number).await;
|
||||||
|
|
||||||
while let Some(plan) = plans.pop_front() {
|
while let Some(plan) = plans.pop_front() {
|
||||||
let id = plan.id();
|
let id = plan.id();
|
||||||
info!("preparing plan {}: {:?}", hex::encode(id), plan);
|
info!("preparing plan {}: {:?}", hex::encode(id), plan);
|
||||||
|
|
||||||
let key = plan.key.to_bytes();
|
let key = plan.key.to_bytes();
|
||||||
MainDb::<C, D>::save_signing(txn, key.as_ref(), block_number.try_into().unwrap(), &plan);
|
MainDb::<N, D>::save_signing(txn, key.as_ref(), block_number.try_into().unwrap(), &plan);
|
||||||
let (tx, branches) =
|
let (tx, branches) =
|
||||||
prepare_send(coin, signers.get_mut(key.as_ref()).unwrap().keys(), block_number, fee, plan)
|
prepare_send(network, signers.get_mut(key.as_ref()).unwrap().keys(), block_number, fee, plan)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
for branch in branches {
|
for branch in branches {
|
||||||
@@ -228,17 +228,17 @@ async fn sign_plans<C: Coin, D: Db>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
coin: &C,
|
network: &N,
|
||||||
coordinator: &mut Co,
|
coordinator: &mut Co,
|
||||||
tributary_mutable: &mut TributaryMutable<C, D>,
|
tributary_mutable: &mut TributaryMutable<N, D>,
|
||||||
substrate_mutable: &mut SubstrateMutable<C, D>,
|
substrate_mutable: &mut SubstrateMutable<N, D>,
|
||||||
msg: &Message,
|
msg: &Message,
|
||||||
) {
|
) {
|
||||||
// If this message expects a higher block number than we have, halt until synced
|
// If this message expects a higher block number than we have, halt until synced
|
||||||
async fn wait<C: Coin, D: Db>(scanner: &ScannerHandle<C, D>, block_hash: &BlockHash) {
|
async fn wait<N: Network, D: Db>(scanner: &ScannerHandle<N, D>, block_hash: &BlockHash) {
|
||||||
let mut needed_hash = <C::Block as Block<C>>::Id::default();
|
let mut needed_hash = <N::Block as Block<N>>::Id::default();
|
||||||
needed_hash.as_mut().copy_from_slice(&block_hash.0);
|
needed_hash.as_mut().copy_from_slice(&block_hash.0);
|
||||||
|
|
||||||
let block_number = loop {
|
let block_number = loop {
|
||||||
@@ -249,7 +249,7 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
warn!(
|
warn!(
|
||||||
"node is desynced. we haven't scanned {} which should happen after {} confirms",
|
"node is desynced. we haven't scanned {} which should happen after {} confirms",
|
||||||
hex::encode(&needed_hash),
|
hex::encode(&needed_hash),
|
||||||
C::CONFIRMATIONS,
|
N::CONFIRMATIONS,
|
||||||
);
|
);
|
||||||
sleep(Duration::from_secs(10)).await;
|
sleep(Duration::from_secs(10)).await;
|
||||||
continue;
|
continue;
|
||||||
@@ -272,11 +272,11 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
let synced = |context: &SubstrateContext, key| -> Result<(), ()> {
|
let synced = |context: &SubstrateContext, key| -> Result<(), ()> {
|
||||||
// Check that we've synced this block and can actually operate on it ourselves
|
// Check that we've synced this block and can actually operate on it ourselves
|
||||||
let latest = scanner.latest_scanned(key);
|
let latest = scanner.latest_scanned(key);
|
||||||
if usize::try_from(context.coin_latest_finalized_block).unwrap() < latest {
|
if usize::try_from(context.network_latest_finalized_block).unwrap() < latest {
|
||||||
log::warn!(
|
log::warn!(
|
||||||
"coin node disconnected/desynced from rest of the network. \
|
"external network node disconnected/desynced from rest of the network. \
|
||||||
our block: {latest:?}, network's acknowledged: {}",
|
our block: {latest:?}, network's acknowledged: {}",
|
||||||
context.coin_latest_finalized_block,
|
context.network_latest_finalized_block,
|
||||||
);
|
);
|
||||||
Err(())?;
|
Err(())?;
|
||||||
}
|
}
|
||||||
@@ -312,21 +312,21 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
CoordinatorMessage::Substrate(msg) => {
|
CoordinatorMessage::Substrate(msg) => {
|
||||||
match msg {
|
match msg {
|
||||||
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, set, key_pair } => {
|
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, set, key_pair } => {
|
||||||
// This is the first key pair for this coin so no block has been finalized yet
|
// This is the first key pair for this network so no block has been finalized yet
|
||||||
let activation_number = if context.coin_latest_finalized_block.0 == [0; 32] {
|
let activation_number = if context.network_latest_finalized_block.0 == [0; 32] {
|
||||||
assert!(tributary_mutable.signers.is_empty());
|
assert!(tributary_mutable.signers.is_empty());
|
||||||
assert!(tributary_mutable.substrate_signers.is_empty());
|
assert!(tributary_mutable.substrate_signers.is_empty());
|
||||||
assert!(substrate_mutable.schedulers.is_empty());
|
assert!(substrate_mutable.schedulers.is_empty());
|
||||||
|
|
||||||
// Wait until a coin's block's time exceeds Serai's time
|
// Wait until a network's block's time exceeds Serai's time
|
||||||
// TODO: This assumes the coin has a monotonic clock for its blocks' times, which
|
// TODO: This assumes the network has a monotonic clock for its blocks' times, which
|
||||||
// isn't a viable assumption
|
// isn't a viable assumption
|
||||||
|
|
||||||
// If the latest block number is 10, then the block indexed by 1 has 10 confirms
|
// If the latest block number is 10, then the block indexed by 1 has 10 confirms
|
||||||
// 10 + 1 - 10 = 1
|
// 10 + 1 - 10 = 1
|
||||||
while get_block(
|
while get_block(
|
||||||
coin,
|
network,
|
||||||
(get_latest_block_number(coin).await + 1).saturating_sub(C::CONFIRMATIONS),
|
(get_latest_block_number(network).await + 1).saturating_sub(N::CONFIRMATIONS),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.time() <
|
.time() <
|
||||||
@@ -334,7 +334,7 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
{
|
{
|
||||||
info!(
|
info!(
|
||||||
"serai confirmed the first key pair for a set. {} {}",
|
"serai confirmed the first key pair for a set. {} {}",
|
||||||
"we're waiting for a coin's finalized block's time to exceed unix time ",
|
"we're waiting for a network's finalized block's time to exceed unix time ",
|
||||||
context.serai_time,
|
context.serai_time,
|
||||||
);
|
);
|
||||||
sleep(Duration::from_secs(5)).await;
|
sleep(Duration::from_secs(5)).await;
|
||||||
@@ -342,13 +342,13 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
|
|
||||||
// Find the first block to do so
|
// Find the first block to do so
|
||||||
let mut earliest =
|
let mut earliest =
|
||||||
(get_latest_block_number(coin).await + 1).saturating_sub(C::CONFIRMATIONS);
|
(get_latest_block_number(network).await + 1).saturating_sub(N::CONFIRMATIONS);
|
||||||
assert!(get_block(coin, earliest).await.time() >= context.serai_time);
|
assert!(get_block(network, earliest).await.time() >= context.serai_time);
|
||||||
// earliest > 0 prevents a panic if Serai creates keys before the genesis block
|
// earliest > 0 prevents a panic if Serai creates keys before the genesis block
|
||||||
// which... should be impossible
|
// which... should be impossible
|
||||||
// Yet a prevented panic is a prevented panic
|
// Yet a prevented panic is a prevented panic
|
||||||
while (earliest > 0) &&
|
while (earliest > 0) &&
|
||||||
(get_block(coin, earliest - 1).await.time() >= context.serai_time)
|
(get_block(network, earliest - 1).await.time() >= context.serai_time)
|
||||||
{
|
{
|
||||||
earliest -= 1;
|
earliest -= 1;
|
||||||
}
|
}
|
||||||
@@ -356,8 +356,8 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
// Use this as the activation block
|
// Use this as the activation block
|
||||||
earliest
|
earliest
|
||||||
} else {
|
} else {
|
||||||
let mut activation_block = <C::Block as Block<C>>::Id::default();
|
let mut activation_block = <N::Block as Block<N>>::Id::default();
|
||||||
activation_block.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0);
|
activation_block.as_mut().copy_from_slice(&context.network_latest_finalized_block.0);
|
||||||
// This block_number call is safe since it unwraps
|
// This block_number call is safe since it unwraps
|
||||||
substrate_mutable
|
substrate_mutable
|
||||||
.scanner
|
.scanner
|
||||||
@@ -369,38 +369,38 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
info!("activating {set:?}'s keys at {activation_number}");
|
info!("activating {set:?}'s keys at {activation_number}");
|
||||||
|
|
||||||
// See TributaryMutable's struct definition for why this block is safe
|
// See TributaryMutable's struct definition for why this block is safe
|
||||||
let KeyConfirmed { substrate_keys, coin_keys } =
|
let KeyConfirmed { substrate_keys, network_keys } =
|
||||||
tributary_mutable.key_gen.confirm(txn, set, key_pair).await;
|
tributary_mutable.key_gen.confirm(txn, set, key_pair).await;
|
||||||
tributary_mutable.substrate_signers.insert(
|
tributary_mutable.substrate_signers.insert(
|
||||||
substrate_keys.group_key().to_bytes().to_vec(),
|
substrate_keys.group_key().to_bytes().to_vec(),
|
||||||
SubstrateSigner::new(substrate_keys),
|
SubstrateSigner::new(substrate_keys),
|
||||||
);
|
);
|
||||||
|
|
||||||
let key = coin_keys.group_key();
|
let key = network_keys.group_key();
|
||||||
|
|
||||||
substrate_mutable.scanner.rotate_key(txn, activation_number, key).await;
|
substrate_mutable.scanner.rotate_key(txn, activation_number, key).await;
|
||||||
substrate_mutable
|
substrate_mutable
|
||||||
.schedulers
|
.schedulers
|
||||||
.insert(key.to_bytes().as_ref().to_vec(), Scheduler::<C>::new::<D>(txn, key));
|
.insert(key.to_bytes().as_ref().to_vec(), Scheduler::<N>::new::<D>(txn, key));
|
||||||
|
|
||||||
tributary_mutable
|
tributary_mutable
|
||||||
.signers
|
.signers
|
||||||
.insert(key.to_bytes().as_ref().to_vec(), Signer::new(coin.clone(), coin_keys));
|
.insert(key.to_bytes().as_ref().to_vec(), Signer::new(network.clone(), network_keys));
|
||||||
}
|
}
|
||||||
|
|
||||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||||
context,
|
context,
|
||||||
network,
|
network: network_id,
|
||||||
block,
|
block,
|
||||||
key: key_vec,
|
key: key_vec,
|
||||||
burns,
|
burns,
|
||||||
} => {
|
} => {
|
||||||
assert_eq!(network, C::NETWORK);
|
assert_eq!(network_id, N::NETWORK, "coordinator sent us data for another network");
|
||||||
|
|
||||||
let mut block_id = <C::Block as Block<C>>::Id::default();
|
let mut block_id = <N::Block as Block<N>>::Id::default();
|
||||||
block_id.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0);
|
block_id.as_mut().copy_from_slice(&context.network_latest_finalized_block.0);
|
||||||
|
|
||||||
let key = <C::Curve as Ciphersuite>::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap();
|
let key = <N::Curve as Ciphersuite>::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap();
|
||||||
|
|
||||||
// We now have to acknowledge every block for this key up to the acknowledged block
|
// We now have to acknowledge every block for this key up to the acknowledged block
|
||||||
let (blocks, outputs) =
|
let (blocks, outputs) =
|
||||||
@@ -418,9 +418,9 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
instruction: OutInstruction { address, data },
|
instruction: OutInstruction { address, data },
|
||||||
balance,
|
balance,
|
||||||
} = out;
|
} = out;
|
||||||
assert_eq!(balance.coin.network(), C::NETWORK);
|
assert_eq!(balance.coin.network(), N::NETWORK);
|
||||||
|
|
||||||
if let Ok(address) = C::Address::try_from(address.consume()) {
|
if let Ok(address) = N::Address::try_from(address.consume()) {
|
||||||
// TODO: Add coin to payment
|
// TODO: Add coin to payment
|
||||||
payments.push(Payment {
|
payments.push(Payment {
|
||||||
address,
|
address,
|
||||||
@@ -439,7 +439,7 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
coordinator
|
coordinator
|
||||||
.send(ProcessorMessage::Coordinator(
|
.send(ProcessorMessage::Coordinator(
|
||||||
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
|
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
|
||||||
network,
|
network: N::NETWORK,
|
||||||
block,
|
block,
|
||||||
plans: plans.iter().map(|plan| plan.id()).collect(),
|
plans: plans.iter().map(|plan| plan.id()).collect(),
|
||||||
},
|
},
|
||||||
@@ -448,7 +448,7 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
|
|
||||||
sign_plans(
|
sign_plans(
|
||||||
txn,
|
txn,
|
||||||
coin,
|
network,
|
||||||
substrate_mutable,
|
substrate_mutable,
|
||||||
// See commentary in TributaryMutable for why this is safe
|
// See commentary in TributaryMutable for why this is safe
|
||||||
&mut tributary_mutable.signers,
|
&mut tributary_mutable.signers,
|
||||||
@@ -462,10 +462,10 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn boot<C: Coin, D: Db>(
|
async fn boot<N: Network, D: Db>(
|
||||||
raw_db: &mut D,
|
raw_db: &mut D,
|
||||||
coin: &C,
|
network: &N,
|
||||||
) -> (MainDb<C, D>, TributaryMutable<C, D>, SubstrateMutable<C, D>) {
|
) -> (MainDb<N, D>, TributaryMutable<N, D>, SubstrateMutable<N, D>) {
|
||||||
let mut entropy_transcript = {
|
let mut entropy_transcript = {
|
||||||
let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified"));
|
let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified"));
|
||||||
if entropy.len() != 64 {
|
if entropy.len() != 64 {
|
||||||
@@ -494,11 +494,11 @@ async fn boot<C: Coin, D: Db>(
|
|||||||
|
|
||||||
// We don't need to re-issue GenerateKey orders because the coordinator is expected to
|
// We don't need to re-issue GenerateKey orders because the coordinator is expected to
|
||||||
// schedule/notify us of new attempts
|
// schedule/notify us of new attempts
|
||||||
let key_gen = KeyGen::<C, _>::new(raw_db.clone(), entropy(b"key-gen_entropy"));
|
let key_gen = KeyGen::<N, _>::new(raw_db.clone(), entropy(b"key-gen_entropy"));
|
||||||
// The scanner has no long-standing orders to re-issue
|
// The scanner has no long-standing orders to re-issue
|
||||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), raw_db.clone());
|
let (mut scanner, active_keys) = Scanner::new(network.clone(), raw_db.clone());
|
||||||
|
|
||||||
let mut schedulers = HashMap::<Vec<u8>, Scheduler<C>>::new();
|
let mut schedulers = HashMap::<Vec<u8>, Scheduler<N>>::new();
|
||||||
let mut substrate_signers = HashMap::new();
|
let mut substrate_signers = HashMap::new();
|
||||||
let mut signers = HashMap::new();
|
let mut signers = HashMap::new();
|
||||||
|
|
||||||
@@ -507,7 +507,7 @@ async fn boot<C: Coin, D: Db>(
|
|||||||
for key in &active_keys {
|
for key in &active_keys {
|
||||||
schedulers.insert(key.to_bytes().as_ref().to_vec(), Scheduler::from_db(raw_db, *key).unwrap());
|
schedulers.insert(key.to_bytes().as_ref().to_vec(), Scheduler::from_db(raw_db, *key).unwrap());
|
||||||
|
|
||||||
let (substrate_keys, coin_keys) = key_gen.keys(key);
|
let (substrate_keys, network_keys) = key_gen.keys(key);
|
||||||
|
|
||||||
let substrate_key = substrate_keys.group_key();
|
let substrate_key = substrate_keys.group_key();
|
||||||
let substrate_signer = SubstrateSigner::new(substrate_keys);
|
let substrate_signer = SubstrateSigner::new(substrate_keys);
|
||||||
@@ -515,25 +515,25 @@ async fn boot<C: Coin, D: Db>(
|
|||||||
// necessary
|
// necessary
|
||||||
substrate_signers.insert(substrate_key.to_bytes().to_vec(), substrate_signer);
|
substrate_signers.insert(substrate_key.to_bytes().to_vec(), substrate_signer);
|
||||||
|
|
||||||
let mut signer = Signer::new(coin.clone(), coin_keys);
|
let mut signer = Signer::new(network.clone(), network_keys);
|
||||||
|
|
||||||
// Load any TXs being actively signed
|
// Load any TXs being actively signed
|
||||||
let key = key.to_bytes();
|
let key = key.to_bytes();
|
||||||
for (block_number, plan) in main_db.signing(key.as_ref()) {
|
for (block_number, plan) in main_db.signing(key.as_ref()) {
|
||||||
let block_number = block_number.try_into().unwrap();
|
let block_number = block_number.try_into().unwrap();
|
||||||
|
|
||||||
let fee = get_fee(coin, block_number).await;
|
let fee = get_fee(network, block_number).await;
|
||||||
|
|
||||||
let id = plan.id();
|
let id = plan.id();
|
||||||
info!("reloading plan {}: {:?}", hex::encode(id), plan);
|
info!("reloading plan {}: {:?}", hex::encode(id), plan);
|
||||||
|
|
||||||
let (Some((tx, eventuality)), _) =
|
let (Some((tx, eventuality)), _) =
|
||||||
prepare_send(coin, signer.keys(), block_number, fee, plan).await else {
|
prepare_send(network, signer.keys(), block_number, fee, plan).await else {
|
||||||
panic!("previously created transaction is no longer being created")
|
panic!("previously created transaction is no longer being created")
|
||||||
};
|
};
|
||||||
|
|
||||||
scanner.register_eventuality(block_number, id, eventuality.clone()).await;
|
scanner.register_eventuality(block_number, id, eventuality.clone()).await;
|
||||||
// TODO: Reconsider if the Signer should have the eventuality, or if just the coin/scanner
|
// TODO: Reconsider if the Signer should have the eventuality, or if just the network/scanner
|
||||||
// should
|
// should
|
||||||
let mut txn = raw_db.txn();
|
let mut txn = raw_db.txn();
|
||||||
signer.sign_transaction(&mut txn, id, tx, eventuality).await;
|
signer.sign_transaction(&mut txn, id, tx, eventuality).await;
|
||||||
@@ -551,14 +551,15 @@ async fn boot<C: Coin, D: Db>(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run<C: Coin, D: Db, Co: Coordinator>(mut raw_db: D, coin: C, mut coordinator: Co) {
|
async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut coordinator: Co) {
|
||||||
// We currently expect a contextless bidirectional mapping between these two values
|
// We currently expect a contextless bidirectional mapping between these two values
|
||||||
// (which is that any value of A can be interpreted as B and vice versa)
|
// (which is that any value of A can be interpreted as B and vice versa)
|
||||||
// While we can write a contextual mapping, we have yet to do so
|
// While we can write a contextual mapping, we have yet to do so
|
||||||
// This check ensures no coin which doesn't have a bidirectional mapping is defined
|
// This check ensures no network which doesn't have a bidirectional mapping is defined
|
||||||
assert_eq!(<C::Block as Block<C>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len());
|
assert_eq!(<N::Block as Block<N>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len());
|
||||||
|
|
||||||
let (mut main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &coin).await;
|
let (mut main_db, mut tributary_mutable, mut substrate_mutable) =
|
||||||
|
boot(&mut raw_db, &network).await;
|
||||||
|
|
||||||
// We can't load this from the DB as we can't guarantee atomic increments with the ack function
|
// We can't load this from the DB as we can't guarantee atomic increments with the ack function
|
||||||
let mut last_coordinator_msg = None;
|
let mut last_coordinator_msg = None;
|
||||||
@@ -625,7 +626,7 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(mut raw_db: D, coin: C, mut coordi
|
|||||||
// Only handle this if we haven't already
|
// Only handle this if we haven't already
|
||||||
if !main_db.handled_message(msg.id) {
|
if !main_db.handled_message(msg.id) {
|
||||||
let mut txn = raw_db.txn();
|
let mut txn = raw_db.txn();
|
||||||
MainDb::<C, D>::handle_message(&mut txn, msg.id);
|
MainDb::<N, D>::handle_message(&mut txn, msg.id);
|
||||||
|
|
||||||
// This is isolated to better think about how its ordered, or rather, about how the other
|
// This is isolated to better think about how its ordered, or rather, about how the other
|
||||||
// cases aren't ordered
|
// cases aren't ordered
|
||||||
@@ -639,7 +640,7 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(mut raw_db: D, coin: C, mut coordi
|
|||||||
// references over the same data
|
// references over the same data
|
||||||
handle_coordinator_msg(
|
handle_coordinator_msg(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
&coin,
|
&network,
|
||||||
&mut coordinator,
|
&mut coordinator,
|
||||||
&mut tributary_mutable,
|
&mut tributary_mutable,
|
||||||
&mut substrate_mutable,
|
&mut substrate_mutable,
|
||||||
@@ -661,7 +662,7 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(mut raw_db: D, coin: C, mut coordi
|
|||||||
block_hash.copy_from_slice(block.as_ref());
|
block_hash.copy_from_slice(block.as_ref());
|
||||||
|
|
||||||
let batch = Batch {
|
let batch = Batch {
|
||||||
network: C::NETWORK,
|
network: N::NETWORK,
|
||||||
id: batch,
|
id: batch,
|
||||||
block: BlockHash(block_hash),
|
block: BlockHash(block_hash),
|
||||||
instructions: outputs.iter().filter_map(|output| {
|
instructions: outputs.iter().filter_map(|output| {
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use bitcoin_serai::{
|
|||||||
hashes::Hash as HashTrait,
|
hashes::Hash as HashTrait,
|
||||||
consensus::{Encodable, Decodable},
|
consensus::{Encodable, Decodable},
|
||||||
script::Instruction,
|
script::Instruction,
|
||||||
OutPoint, Transaction, Block, Network,
|
OutPoint, Transaction, Block, Network as BitcoinNetwork,
|
||||||
},
|
},
|
||||||
wallet::{
|
wallet::{
|
||||||
tweak_keys, address, ReceivedOutput, Scanner, TransactionError,
|
tweak_keys, address, ReceivedOutput, Scanner, TransactionError,
|
||||||
@@ -38,13 +38,13 @@ use bitcoin_serai::bitcoin::{
|
|||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance},
|
primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance},
|
||||||
coins::bitcoin::Address,
|
networks::bitcoin::Address,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
coins::{
|
networks::{
|
||||||
CoinError, Block as BlockTrait, OutputType, Output as OutputTrait,
|
NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait,
|
||||||
Transaction as TransactionTrait, Eventuality, EventualitiesTracker, PostFeeBranch, Coin,
|
Transaction as TransactionTrait, Eventuality, EventualitiesTracker, PostFeeBranch, Network,
|
||||||
drop_branches, amortize_fee,
|
drop_branches, amortize_fee,
|
||||||
},
|
},
|
||||||
Plan,
|
Plan,
|
||||||
@@ -144,13 +144,13 @@ impl TransactionTrait<Bitcoin> for Transaction {
|
|||||||
buf
|
buf
|
||||||
}
|
}
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn fee(&self, coin: &Bitcoin) -> u64 {
|
async fn fee(&self, network: &Bitcoin) -> u64 {
|
||||||
let mut value = 0;
|
let mut value = 0;
|
||||||
for input in &self.input {
|
for input in &self.input {
|
||||||
let output = input.previous_output;
|
let output = input.previous_output;
|
||||||
let mut hash = *output.txid.as_raw_hash().as_byte_array();
|
let mut hash = *output.txid.as_raw_hash().as_byte_array();
|
||||||
hash.reverse();
|
hash.reverse();
|
||||||
value += coin.rpc.get_transaction(&hash).await.unwrap().output
|
value += network.rpc.get_transaction(&hash).await.unwrap().output
|
||||||
[usize::try_from(output.vout).unwrap()]
|
[usize::try_from(output.vout).unwrap()]
|
||||||
.value;
|
.value;
|
||||||
}
|
}
|
||||||
@@ -280,7 +280,7 @@ impl Bitcoin {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Coin for Bitcoin {
|
impl Network for Bitcoin {
|
||||||
type Curve = Secp256k1;
|
type Curve = Secp256k1;
|
||||||
|
|
||||||
type Fee = Fee;
|
type Fee = Fee;
|
||||||
@@ -326,7 +326,7 @@ impl Coin for Bitcoin {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn address(key: ProjectivePoint) -> Address {
|
fn address(key: ProjectivePoint) -> Address {
|
||||||
Address(address(Network::Bitcoin, key).unwrap())
|
Address(address(BitcoinNetwork::Bitcoin, key).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn branch_address(key: ProjectivePoint) -> Self::Address {
|
fn branch_address(key: ProjectivePoint) -> Self::Address {
|
||||||
@@ -334,21 +334,21 @@ impl Coin for Bitcoin {
|
|||||||
Self::address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))
|
Self::address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_latest_block_number(&self) -> Result<usize, CoinError> {
|
async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
|
||||||
self.rpc.get_latest_block_number().await.map_err(|_| CoinError::ConnectionError)
|
self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_block(&self, number: usize) -> Result<Self::Block, CoinError> {
|
async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {
|
||||||
let block_hash =
|
let block_hash =
|
||||||
self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?;
|
self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?;
|
||||||
self.rpc.get_block(&block_hash).await.map_err(|_| CoinError::ConnectionError)
|
self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_outputs(
|
async fn get_outputs(
|
||||||
&self,
|
&self,
|
||||||
block: &Self::Block,
|
block: &Self::Block,
|
||||||
key: ProjectivePoint,
|
key: ProjectivePoint,
|
||||||
) -> Result<Vec<Self::Output>, CoinError> {
|
) -> Result<Vec<Self::Output>, NetworkError> {
|
||||||
let (scanner, _, kinds) = scanner(key);
|
let (scanner, _, kinds) = scanner(key);
|
||||||
|
|
||||||
let mut outputs = vec![];
|
let mut outputs = vec![];
|
||||||
@@ -452,7 +452,8 @@ impl Coin for Bitcoin {
|
|||||||
_: usize,
|
_: usize,
|
||||||
mut plan: Plan<Self>,
|
mut plan: Plan<Self>,
|
||||||
fee: Fee,
|
fee: Fee,
|
||||||
) -> Result<(Option<(SignableTransaction, Self::Eventuality)>, Vec<PostFeeBranch>), CoinError> {
|
) -> Result<(Option<(SignableTransaction, Self::Eventuality)>, Vec<PostFeeBranch>), NetworkError>
|
||||||
|
{
|
||||||
let signable = |plan: &Plan<Self>, tx_fee: Option<_>| {
|
let signable = |plan: &Plan<Self>, tx_fee: Option<_>| {
|
||||||
let mut payments = vec![];
|
let mut payments = vec![];
|
||||||
for payment in &plan.payments {
|
for payment in &plan.payments {
|
||||||
@@ -521,7 +522,7 @@ impl Coin for Bitcoin {
|
|||||||
async fn attempt_send(
|
async fn attempt_send(
|
||||||
&self,
|
&self,
|
||||||
transaction: Self::SignableTransaction,
|
transaction: Self::SignableTransaction,
|
||||||
) -> Result<Self::TransactionMachine, CoinError> {
|
) -> Result<Self::TransactionMachine, NetworkError> {
|
||||||
Ok(
|
Ok(
|
||||||
transaction
|
transaction
|
||||||
.actual
|
.actual
|
||||||
@@ -531,10 +532,10 @@ impl Coin for Bitcoin {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> {
|
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> {
|
||||||
match self.rpc.send_raw_transaction(tx).await {
|
match self.rpc.send_raw_transaction(tx).await {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?,
|
Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?,
|
||||||
// TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs
|
// TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs
|
||||||
// invalid transaction
|
// invalid transaction
|
||||||
Err(e) => panic!("failed to publish TX {}: {e}", tx.txid()),
|
Err(e) => panic!("failed to publish TX {}: {e}", tx.txid()),
|
||||||
@@ -542,8 +543,8 @@ impl Coin for Bitcoin {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_transaction(&self, id: &[u8; 32]) -> Result<Transaction, CoinError> {
|
async fn get_transaction(&self, id: &[u8; 32]) -> Result<Transaction, NetworkError> {
|
||||||
self.rpc.get_transaction(id).await.map_err(|_| CoinError::ConnectionError)
|
self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn confirm_completion(&self, eventuality: &OutPoint, tx: &Transaction) -> bool {
|
fn confirm_completion(&self, eventuality: &OutPoint, tx: &Transaction) -> bool {
|
||||||
@@ -566,7 +567,7 @@ impl Coin for Bitcoin {
|
|||||||
.rpc
|
.rpc
|
||||||
.rpc_call::<Vec<String>>(
|
.rpc_call::<Vec<String>>(
|
||||||
"generatetoaddress",
|
"generatetoaddress",
|
||||||
serde_json::json!([1, BAddress::p2sh(Script::empty(), Network::Regtest).unwrap()]),
|
serde_json::json!([1, BAddress::p2sh(Script::empty(), BitcoinNetwork::Regtest).unwrap()]),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -575,9 +576,9 @@ impl Coin for Bitcoin {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn test_send(&self, address: Self::Address) -> Block {
|
async fn test_send(&self, address: Self::Address) -> Block {
|
||||||
let secret_key = SecretKey::new(&mut rand_core::OsRng);
|
let secret_key = SecretKey::new(&mut rand_core::OsRng);
|
||||||
let private_key = PrivateKey::new(secret_key, Network::Regtest);
|
let private_key = PrivateKey::new(secret_key, BitcoinNetwork::Regtest);
|
||||||
let public_key = PublicKey::from_private_key(SECP256K1, &private_key);
|
let public_key = PublicKey::from_private_key(SECP256K1, &private_key);
|
||||||
let main_addr = BAddress::p2pkh(&public_key, Network::Regtest);
|
let main_addr = BAddress::p2pkh(&public_key, BitcoinNetwork::Regtest);
|
||||||
|
|
||||||
let new_block = self.get_latest_block_number().await.unwrap() + 1;
|
let new_block = self.get_latest_block_number().await.unwrap() + 1;
|
||||||
self
|
self
|
||||||
@@ -25,8 +25,8 @@ pub use monero::Monero;
|
|||||||
use crate::{Payment, Plan};
|
use crate::{Payment, Plan};
|
||||||
|
|
||||||
#[derive(Clone, Copy, Error, Debug)]
|
#[derive(Clone, Copy, Error, Debug)]
|
||||||
pub enum CoinError {
|
pub enum NetworkError {
|
||||||
#[error("failed to connect to coin daemon")]
|
#[error("failed to connect to network daemon")]
|
||||||
ConnectionError,
|
ConnectionError,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,13 +108,13 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Transaction<C: Coin>: Send + Sync + Sized + Clone + Debug {
|
pub trait Transaction<N: Network>: Send + Sync + Sized + Clone + Debug {
|
||||||
type Id: 'static + Id;
|
type Id: 'static + Id;
|
||||||
fn id(&self) -> Self::Id;
|
fn id(&self) -> Self::Id;
|
||||||
fn serialize(&self) -> Vec<u8>;
|
fn serialize(&self) -> Vec<u8>;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn fee(&self, coin: &C) -> u64;
|
async fn fee(&self, network: &N) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Eventuality: Send + Sync + Clone + Debug {
|
pub trait Eventuality: Send + Sync + Clone + Debug {
|
||||||
@@ -171,13 +171,13 @@ impl<E: Eventuality> Default for EventualitiesTracker<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Block<C: Coin>: Send + Sync + Sized + Clone + Debug {
|
pub trait Block<N: Network>: Send + Sync + Sized + Clone + Debug {
|
||||||
// This is currently bounded to being 32-bytes.
|
// This is currently bounded to being 32-bytes.
|
||||||
type Id: 'static + Id;
|
type Id: 'static + Id;
|
||||||
fn id(&self) -> Self::Id;
|
fn id(&self) -> Self::Id;
|
||||||
fn parent(&self) -> Self::Id;
|
fn parent(&self) -> Self::Id;
|
||||||
fn time(&self) -> u64;
|
fn time(&self) -> u64;
|
||||||
fn median_fee(&self) -> C::Fee;
|
fn median_fee(&self) -> N::Fee;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The post-fee value of an expected branch.
|
// The post-fee value of an expected branch.
|
||||||
@@ -187,10 +187,10 @@ pub struct PostFeeBranch {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the PostFeeBranches needed when dropping a transaction
|
// Return the PostFeeBranches needed when dropping a transaction
|
||||||
pub fn drop_branches<C: Coin>(plan: &Plan<C>) -> Vec<PostFeeBranch> {
|
pub fn drop_branches<N: Network>(plan: &Plan<N>) -> Vec<PostFeeBranch> {
|
||||||
let mut branch_outputs = vec![];
|
let mut branch_outputs = vec![];
|
||||||
for payment in &plan.payments {
|
for payment in &plan.payments {
|
||||||
if payment.address == C::branch_address(plan.key) {
|
if payment.address == N::branch_address(plan.key) {
|
||||||
branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: None });
|
branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: None });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,7 +198,7 @@ pub fn drop_branches<C: Coin>(plan: &Plan<C>) -> Vec<PostFeeBranch> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Amortize a fee over the plan's payments
|
// Amortize a fee over the plan's payments
|
||||||
pub fn amortize_fee<C: Coin>(plan: &mut Plan<C>, tx_fee: u64) -> Vec<PostFeeBranch> {
|
pub fn amortize_fee<N: Network>(plan: &mut Plan<N>, tx_fee: u64) -> Vec<PostFeeBranch> {
|
||||||
// No payments to amortize over
|
// No payments to amortize over
|
||||||
if plan.payments.is_empty() {
|
if plan.payments.is_empty() {
|
||||||
return vec![];
|
return vec![];
|
||||||
@@ -211,11 +211,11 @@ pub fn amortize_fee<C: Coin>(plan: &mut Plan<C>, tx_fee: u64) -> Vec<PostFeeBran
|
|||||||
// Use a formula which will round up
|
// Use a formula which will round up
|
||||||
let per_output_fee = |payments| (tx_fee + (payments - 1)) / payments;
|
let per_output_fee = |payments| (tx_fee + (payments - 1)) / payments;
|
||||||
|
|
||||||
let post_fee = |payment: &Payment<C>, per_output_fee| {
|
let post_fee = |payment: &Payment<N>, per_output_fee| {
|
||||||
let mut post_fee = payment.amount.checked_sub(per_output_fee);
|
let mut post_fee = payment.amount.checked_sub(per_output_fee);
|
||||||
// If this is under our dust threshold, drop it
|
// If this is under our dust threshold, drop it
|
||||||
if let Some(amount) = post_fee {
|
if let Some(amount) = post_fee {
|
||||||
if amount < C::DUST {
|
if amount < N::DUST {
|
||||||
post_fee = None;
|
post_fee = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -244,7 +244,7 @@ pub fn amortize_fee<C: Coin>(plan: &mut Plan<C>, tx_fee: u64) -> Vec<PostFeeBran
|
|||||||
for payment in plan.payments.iter_mut() {
|
for payment in plan.payments.iter_mut() {
|
||||||
let post_fee = post_fee(payment, per_output_fee);
|
let post_fee = post_fee(payment, per_output_fee);
|
||||||
// Note the branch output, if this is one
|
// Note the branch output, if this is one
|
||||||
if payment.address == C::branch_address(plan.key) {
|
if payment.address == N::branch_address(plan.key) {
|
||||||
branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: post_fee });
|
branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: post_fee });
|
||||||
}
|
}
|
||||||
payment.amount = post_fee.unwrap_or(0);
|
payment.amount = post_fee.unwrap_or(0);
|
||||||
@@ -260,21 +260,21 @@ pub fn amortize_fee<C: Coin>(plan: &mut Plan<C>, tx_fee: u64) -> Vec<PostFeeBran
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||||
/// The elliptic curve used for this coin.
|
/// The elliptic curve used for this network.
|
||||||
type Curve: Curve;
|
type Curve: Curve;
|
||||||
|
|
||||||
/// The type representing the fee for this coin.
|
/// The type representing the fee for this network.
|
||||||
// This should likely be a u64, wrapped in a type which implements appropriate fee logic.
|
// This should likely be a u64, wrapped in a type which implements appropriate fee logic.
|
||||||
type Fee: Copy;
|
type Fee: Copy;
|
||||||
|
|
||||||
/// The type representing the transaction for this coin.
|
/// The type representing the transaction for this network.
|
||||||
type Transaction: Transaction<Self>;
|
type Transaction: Transaction<Self>;
|
||||||
/// The type representing the block for this coin.
|
/// The type representing the block for this network.
|
||||||
type Block: Block<Self>;
|
type Block: Block<Self>;
|
||||||
|
|
||||||
/// The type containing all information on a scanned output.
|
/// The type containing all information on a scanned output.
|
||||||
// This is almost certainly distinct from the coin's native output type.
|
// This is almost certainly distinct from the network's native output type.
|
||||||
type Output: Output;
|
type Output: Output;
|
||||||
/// The type containing all information on a planned transaction, waiting to be signed.
|
/// The type containing all information on a planned transaction, waiting to be signed.
|
||||||
type SignableTransaction: Send + Sync + Clone + Debug;
|
type SignableTransaction: Send + Sync + Clone + Debug;
|
||||||
@@ -296,9 +296,9 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
|||||||
+ TryInto<Vec<u8>>
|
+ TryInto<Vec<u8>>
|
||||||
+ TryFrom<Vec<u8>>;
|
+ TryFrom<Vec<u8>>;
|
||||||
|
|
||||||
/// Network ID for this coin.
|
/// Network ID for this network.
|
||||||
const NETWORK: NetworkId;
|
const NETWORK: NetworkId;
|
||||||
/// String ID for this coin.
|
/// String ID for this network.
|
||||||
const ID: &'static str;
|
const ID: &'static str;
|
||||||
/// The amount of confirmations required to consider a block 'final'.
|
/// The amount of confirmations required to consider a block 'final'.
|
||||||
const CONFIRMATIONS: usize;
|
const CONFIRMATIONS: usize;
|
||||||
@@ -314,7 +314,7 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
|||||||
/// Minimum output value which will be handled.
|
/// Minimum output value which will be handled.
|
||||||
const DUST: u64;
|
const DUST: u64;
|
||||||
|
|
||||||
/// Tweak keys for this coin.
|
/// Tweak keys for this network.
|
||||||
fn tweak_keys(key: &mut ThresholdKeys<Self::Curve>);
|
fn tweak_keys(key: &mut ThresholdKeys<Self::Curve>);
|
||||||
|
|
||||||
/// Address for the given group key to receive external coins to.
|
/// Address for the given group key to receive external coins to.
|
||||||
@@ -324,15 +324,15 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
|||||||
fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||||
|
|
||||||
/// Get the latest block's number.
|
/// Get the latest block's number.
|
||||||
async fn get_latest_block_number(&self) -> Result<usize, CoinError>;
|
async fn get_latest_block_number(&self) -> Result<usize, NetworkError>;
|
||||||
/// Get a block by its number.
|
/// Get a block by its number.
|
||||||
async fn get_block(&self, number: usize) -> Result<Self::Block, CoinError>;
|
async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError>;
|
||||||
/// Get the outputs within a block for a specific key.
|
/// Get the outputs within a block for a specific key.
|
||||||
async fn get_outputs(
|
async fn get_outputs(
|
||||||
&self,
|
&self,
|
||||||
block: &Self::Block,
|
block: &Self::Block,
|
||||||
key: <Self::Curve as Ciphersuite>::G,
|
key: <Self::Curve as Ciphersuite>::G,
|
||||||
) -> Result<Vec<Self::Output>, CoinError>;
|
) -> Result<Vec<Self::Output>, NetworkError>;
|
||||||
|
|
||||||
/// Get the registered eventualities completed within this block, and any prior blocks which
|
/// Get the registered eventualities completed within this block, and any prior blocks which
|
||||||
/// registered eventualities may have been completed in.
|
/// registered eventualities may have been completed in.
|
||||||
@@ -353,23 +353,23 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
|||||||
fee: Self::Fee,
|
fee: Self::Fee,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(Option<(Self::SignableTransaction, Self::Eventuality)>, Vec<PostFeeBranch>),
|
(Option<(Self::SignableTransaction, Self::Eventuality)>, Vec<PostFeeBranch>),
|
||||||
CoinError
|
NetworkError
|
||||||
>;
|
>;
|
||||||
|
|
||||||
/// Attempt to sign a SignableTransaction.
|
/// Attempt to sign a SignableTransaction.
|
||||||
async fn attempt_send(
|
async fn attempt_send(
|
||||||
&self,
|
&self,
|
||||||
transaction: Self::SignableTransaction,
|
transaction: Self::SignableTransaction,
|
||||||
) -> Result<Self::TransactionMachine, CoinError>;
|
) -> Result<Self::TransactionMachine, NetworkError>;
|
||||||
|
|
||||||
/// Publish a transaction.
|
/// Publish a transaction.
|
||||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError>;
|
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>;
|
||||||
|
|
||||||
/// Get a transaction by its ID.
|
/// Get a transaction by its ID.
|
||||||
async fn get_transaction(
|
async fn get_transaction(
|
||||||
&self,
|
&self,
|
||||||
id: &<Self::Transaction as Transaction<Self>>::Id,
|
id: &<Self::Transaction as Transaction<Self>>::Id,
|
||||||
) -> Result<Self::Transaction, CoinError>;
|
) -> Result<Self::Transaction, NetworkError>;
|
||||||
|
|
||||||
/// Confirm a plan was completed by the specified transaction.
|
/// Confirm a plan was completed by the specified transaction.
|
||||||
// This is allowed to take shortcuts.
|
// This is allowed to take shortcuts.
|
||||||
@@ -20,7 +20,7 @@ use monero_serai::{
|
|||||||
rpc::{RpcError, HttpRpc, Rpc},
|
rpc::{RpcError, HttpRpc, Rpc},
|
||||||
wallet::{
|
wallet::{
|
||||||
ViewPair, Scanner,
|
ViewPair, Scanner,
|
||||||
address::{Network, SubaddressIndex, AddressSpec},
|
address::{Network as MoneroNetwork, SubaddressIndex, AddressSpec},
|
||||||
Fee, SpendableOutput, Change, Decoys, TransactionError,
|
Fee, SpendableOutput, Change, Decoys, TransactionError,
|
||||||
SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine,
|
SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine,
|
||||||
},
|
},
|
||||||
@@ -30,15 +30,15 @@ use tokio::time::sleep;
|
|||||||
|
|
||||||
pub use serai_client::{
|
pub use serai_client::{
|
||||||
primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance},
|
primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance},
|
||||||
coins::monero::Address,
|
networks::monero::Address,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Payment, Plan, additional_key,
|
Payment, Plan, additional_key,
|
||||||
coins::{
|
networks::{
|
||||||
CoinError, Block as BlockTrait, OutputType, Output as OutputTrait,
|
NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait,
|
||||||
Transaction as TransactionTrait, Eventuality as EventualityTrait, EventualitiesTracker,
|
Transaction as TransactionTrait, Eventuality as EventualityTrait, EventualitiesTracker,
|
||||||
PostFeeBranch, Coin, drop_branches, amortize_fee,
|
PostFeeBranch, Network, drop_branches, amortize_fee,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -179,7 +179,7 @@ impl Monero {
|
|||||||
|
|
||||||
fn address_internal(spend: EdwardsPoint, subaddress: Option<SubaddressIndex>) -> Address {
|
fn address_internal(spend: EdwardsPoint, subaddress: Option<SubaddressIndex>) -> Address {
|
||||||
Address::new(Self::view_pair(spend).address(
|
Address::new(Self::view_pair(spend).address(
|
||||||
Network::Mainnet,
|
MoneroNetwork::Mainnet,
|
||||||
AddressSpec::Featured { subaddress, payment_id: None, guaranteed: true },
|
AddressSpec::Featured { subaddress, payment_id: None, guaranteed: true },
|
||||||
))
|
))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -205,12 +205,13 @@ impl Monero {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn test_address() -> Address {
|
fn test_address() -> Address {
|
||||||
Address::new(Self::test_view_pair().address(Network::Mainnet, AddressSpec::Standard)).unwrap()
|
Address::new(Self::test_view_pair().address(MoneroNetwork::Mainnet, AddressSpec::Standard))
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Coin for Monero {
|
impl Network for Monero {
|
||||||
type Curve = Ed25519;
|
type Curve = Ed25519;
|
||||||
|
|
||||||
type Fee = Fee;
|
type Fee = Fee;
|
||||||
@@ -249,18 +250,20 @@ impl Coin for Monero {
|
|||||||
Self::address_internal(key, BRANCH_SUBADDRESS)
|
Self::address_internal(key, BRANCH_SUBADDRESS)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_latest_block_number(&self) -> Result<usize, CoinError> {
|
async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
|
||||||
// Monero defines height as chain length, so subtract 1 for block number
|
// Monero defines height as chain length, so subtract 1 for block number
|
||||||
Ok(self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)? - 1)
|
Ok(self.rpc.get_height().await.map_err(|_| NetworkError::ConnectionError)? - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_block(&self, number: usize) -> Result<Self::Block, CoinError> {
|
async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {
|
||||||
Ok(
|
Ok(
|
||||||
self
|
self
|
||||||
.rpc
|
.rpc
|
||||||
.get_block(self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?)
|
.get_block(
|
||||||
|
self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| CoinError::ConnectionError)?,
|
.map_err(|_| NetworkError::ConnectionError)?,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,11 +271,11 @@ impl Coin for Monero {
|
|||||||
&self,
|
&self,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
key: EdwardsPoint,
|
key: EdwardsPoint,
|
||||||
) -> Result<Vec<Self::Output>, CoinError> {
|
) -> Result<Vec<Self::Output>, NetworkError> {
|
||||||
let mut txs = Self::scanner(key)
|
let mut txs = Self::scanner(key)
|
||||||
.scan(&self.rpc, block)
|
.scan(&self.rpc, block)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| CoinError::ConnectionError)?
|
.map_err(|_| NetworkError::ConnectionError)?
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|outputs| Some(outputs.not_locked()).filter(|outputs| !outputs.is_empty()))
|
.filter_map(|outputs| Some(outputs.not_locked()).filter(|outputs| !outputs.is_empty()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@@ -316,7 +319,7 @@ impl Coin for Monero {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn check_block(
|
async fn check_block(
|
||||||
coin: &Monero,
|
network: &Monero,
|
||||||
eventualities: &mut EventualitiesTracker<Eventuality>,
|
eventualities: &mut EventualitiesTracker<Eventuality>,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
res: &mut HashMap<[u8; 32], [u8; 32]>,
|
res: &mut HashMap<[u8; 32], [u8; 32]>,
|
||||||
@@ -325,7 +328,7 @@ impl Coin for Monero {
|
|||||||
let tx = {
|
let tx = {
|
||||||
let mut tx;
|
let mut tx;
|
||||||
while {
|
while {
|
||||||
tx = coin.get_transaction(hash).await;
|
tx = network.get_transaction(hash).await;
|
||||||
tx.is_err()
|
tx.is_err()
|
||||||
} {
|
} {
|
||||||
log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap());
|
log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap());
|
||||||
@@ -374,7 +377,7 @@ impl Coin for Monero {
|
|||||||
block_number: usize,
|
block_number: usize,
|
||||||
mut plan: Plan<Self>,
|
mut plan: Plan<Self>,
|
||||||
fee: Fee,
|
fee: Fee,
|
||||||
) -> Result<(Option<(SignableTransaction, Eventuality)>, Vec<PostFeeBranch>), CoinError> {
|
) -> Result<(Option<(SignableTransaction, Eventuality)>, Vec<PostFeeBranch>), NetworkError> {
|
||||||
// Sanity check this has at least one output planned
|
// Sanity check this has at least one output planned
|
||||||
assert!((!plan.payments.is_empty()) || plan.change.is_some());
|
assert!((!plan.payments.is_empty()) || plan.change.is_some());
|
||||||
|
|
||||||
@@ -397,7 +400,7 @@ impl Coin for Monero {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check a fork hasn't occurred which this processor hasn't been updated for
|
// Check a fork hasn't occurred which this processor hasn't been updated for
|
||||||
assert_eq!(protocol, self.rpc.get_protocol().await.map_err(|_| CoinError::ConnectionError)?);
|
assert_eq!(protocol, self.rpc.get_protocol().await.map_err(|_| NetworkError::ConnectionError)?);
|
||||||
|
|
||||||
let spendable_outputs = plan.inputs.iter().cloned().map(|input| input.0).collect::<Vec<_>>();
|
let spendable_outputs = plan.inputs.iter().cloned().map(|input| input.0).collect::<Vec<_>>();
|
||||||
|
|
||||||
@@ -413,7 +416,7 @@ impl Coin for Monero {
|
|||||||
&spendable_outputs,
|
&spendable_outputs,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| CoinError::ConnectionError)
|
.map_err(|_| NetworkError::ConnectionError)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let inputs = spendable_outputs.into_iter().zip(decoys.into_iter()).collect::<Vec<_>>();
|
let inputs = spendable_outputs.into_iter().zip(decoys.into_iter()).collect::<Vec<_>>();
|
||||||
@@ -428,7 +431,7 @@ impl Coin for Monero {
|
|||||||
plan.payments.push(Payment {
|
plan.payments.push(Payment {
|
||||||
address: Address::new(
|
address: Address::new(
|
||||||
ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0))
|
ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0))
|
||||||
.address(Network::Mainnet, AddressSpec::Standard),
|
.address(MoneroNetwork::Mainnet, AddressSpec::Standard),
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
amount: 0,
|
amount: 0,
|
||||||
@@ -492,7 +495,7 @@ impl Coin for Monero {
|
|||||||
}
|
}
|
||||||
TransactionError::RpcError(e) => {
|
TransactionError::RpcError(e) => {
|
||||||
log::error!("RpcError when preparing transaction: {e:?}");
|
log::error!("RpcError when preparing transaction: {e:?}");
|
||||||
Err(CoinError::ConnectionError)
|
Err(NetworkError::ConnectionError)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -520,25 +523,25 @@ impl Coin for Monero {
|
|||||||
async fn attempt_send(
|
async fn attempt_send(
|
||||||
&self,
|
&self,
|
||||||
transaction: SignableTransaction,
|
transaction: SignableTransaction,
|
||||||
) -> Result<Self::TransactionMachine, CoinError> {
|
) -> Result<Self::TransactionMachine, NetworkError> {
|
||||||
match transaction.actual.clone().multisig(transaction.keys.clone(), transaction.transcript) {
|
match transaction.actual.clone().multisig(transaction.keys.clone(), transaction.transcript) {
|
||||||
Ok(machine) => Ok(machine),
|
Ok(machine) => Ok(machine),
|
||||||
Err(e) => panic!("failed to create a multisig machine for TX: {e}"),
|
Err(e) => panic!("failed to create a multisig machine for TX: {e}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> {
|
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> {
|
||||||
match self.rpc.publish_transaction(tx).await {
|
match self.rpc.publish_transaction(tx).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?,
|
Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?,
|
||||||
// TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs
|
// TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs
|
||||||
// invalid transaction
|
// invalid transaction
|
||||||
Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())),
|
Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_transaction(&self, id: &[u8; 32]) -> Result<Transaction, CoinError> {
|
async fn get_transaction(&self, id: &[u8; 32]) -> Result<Transaction, NetworkError> {
|
||||||
self.rpc.get_transaction(*id).await.map_err(|_| CoinError::ConnectionError)
|
self.rpc.get_transaction(*id).await.map_err(|_| NetworkError::ConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool {
|
fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool {
|
||||||
@@ -4,16 +4,16 @@ use transcript::{Transcript, RecommendedTranscript};
|
|||||||
use ciphersuite::group::GroupEncoding;
|
use ciphersuite::group::GroupEncoding;
|
||||||
use frost::curve::Ciphersuite;
|
use frost::curve::Ciphersuite;
|
||||||
|
|
||||||
use crate::coins::{Output, Coin};
|
use crate::networks::{Output, Network};
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Payment<C: Coin> {
|
pub struct Payment<N: Network> {
|
||||||
pub address: C::Address,
|
pub address: N::Address,
|
||||||
pub data: Option<Vec<u8>>,
|
pub data: Option<Vec<u8>>,
|
||||||
pub amount: u64,
|
pub amount: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin> Payment<C> {
|
impl<N: Network> Payment<N> {
|
||||||
pub fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
pub fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||||
transcript.domain_separate(b"payment");
|
transcript.domain_separate(b"payment");
|
||||||
transcript.append_message(b"address", self.address.to_string().as_bytes());
|
transcript.append_message(b"address", self.address.to_string().as_bytes());
|
||||||
@@ -46,7 +46,7 @@ impl<C: Coin> Payment<C> {
|
|||||||
reader.read_exact(&mut buf)?;
|
reader.read_exact(&mut buf)?;
|
||||||
let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()];
|
let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()];
|
||||||
reader.read_exact(&mut address)?;
|
reader.read_exact(&mut address)?;
|
||||||
let address = C::Address::try_from(address)
|
let address = N::Address::try_from(address)
|
||||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid address"))?;
|
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid address"))?;
|
||||||
|
|
||||||
let mut buf = [0; 1];
|
let mut buf = [0; 1];
|
||||||
@@ -70,13 +70,13 @@ impl<C: Coin> Payment<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub struct Plan<C: Coin> {
|
pub struct Plan<N: Network> {
|
||||||
pub key: <C::Curve as Ciphersuite>::G,
|
pub key: <N::Curve as Ciphersuite>::G,
|
||||||
pub inputs: Vec<C::Output>,
|
pub inputs: Vec<N::Output>,
|
||||||
pub payments: Vec<Payment<C>>,
|
pub payments: Vec<Payment<N>>,
|
||||||
pub change: Option<<C::Curve as Ciphersuite>::G>,
|
pub change: Option<<N::Curve as Ciphersuite>::G>,
|
||||||
}
|
}
|
||||||
impl<C: Coin> core::fmt::Debug for Plan<C> {
|
impl<N: Network> core::fmt::Debug for Plan<N> {
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
fmt
|
fmt
|
||||||
.debug_struct("Plan")
|
.debug_struct("Plan")
|
||||||
@@ -88,11 +88,11 @@ impl<C: Coin> core::fmt::Debug for Plan<C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin> Plan<C> {
|
impl<N: Network> Plan<N> {
|
||||||
pub fn transcript(&self) -> RecommendedTranscript {
|
pub fn transcript(&self) -> RecommendedTranscript {
|
||||||
let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID");
|
let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID");
|
||||||
transcript.domain_separate(b"meta");
|
transcript.domain_separate(b"meta");
|
||||||
transcript.append_message(b"network", C::ID);
|
transcript.append_message(b"network", N::ID);
|
||||||
transcript.append_message(b"key", self.key.to_bytes());
|
transcript.append_message(b"key", self.key.to_bytes());
|
||||||
|
|
||||||
transcript.domain_separate(b"inputs");
|
transcript.domain_separate(b"inputs");
|
||||||
@@ -141,24 +141,24 @@ impl<C: Coin> Plan<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let key = C::Curve::read_G(reader)?;
|
let key = N::Curve::read_G(reader)?;
|
||||||
|
|
||||||
let mut inputs = vec![];
|
let mut inputs = vec![];
|
||||||
let mut buf = [0; 4];
|
let mut buf = [0; 4];
|
||||||
reader.read_exact(&mut buf)?;
|
reader.read_exact(&mut buf)?;
|
||||||
for _ in 0 .. u32::from_le_bytes(buf) {
|
for _ in 0 .. u32::from_le_bytes(buf) {
|
||||||
inputs.push(C::Output::read(reader)?);
|
inputs.push(N::Output::read(reader)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut payments = vec![];
|
let mut payments = vec![];
|
||||||
reader.read_exact(&mut buf)?;
|
reader.read_exact(&mut buf)?;
|
||||||
for _ in 0 .. u32::from_le_bytes(buf) {
|
for _ in 0 .. u32::from_le_bytes(buf) {
|
||||||
payments.push(Payment::<C>::read(reader)?);
|
payments.push(Payment::<N>::read(reader)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut buf = [0; 1];
|
let mut buf = [0; 1];
|
||||||
reader.read_exact(&mut buf)?;
|
reader.read_exact(&mut buf)?;
|
||||||
let change = if buf[0] == 1 { Some(C::Curve::read_G(reader)?) } else { None };
|
let change = if buf[0] == 1 { Some(N::Curve::read_G(reader)?) } else { None };
|
||||||
|
|
||||||
Ok(Plan { key, inputs, payments, change })
|
Ok(Plan { key, inputs, payments, change })
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,27 +18,27 @@ use serai_client::primitives::BlockHash;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Get, DbTxn, Db,
|
Get, DbTxn, Db,
|
||||||
coins::{Output, Transaction, EventualitiesTracker, Block, Coin},
|
networks::{Output, Transaction, EventualitiesTracker, Block, Network},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ScannerEvent<C: Coin> {
|
pub enum ScannerEvent<N: Network> {
|
||||||
// Block scanned
|
// Block scanned
|
||||||
Block {
|
Block {
|
||||||
key: <C::Curve as Ciphersuite>::G,
|
key: <N::Curve as Ciphersuite>::G,
|
||||||
block: <C::Block as Block<C>>::Id,
|
block: <N::Block as Block<N>>::Id,
|
||||||
batch: u32,
|
batch: u32,
|
||||||
outputs: Vec<C::Output>,
|
outputs: Vec<N::Output>,
|
||||||
},
|
},
|
||||||
// Eventuality completion found on-chain
|
// Eventuality completion found on-chain
|
||||||
Completed([u8; 32], <C::Transaction as Transaction<C>>::Id),
|
Completed([u8; 32], <N::Transaction as Transaction<N>>::Id),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type ScannerEventChannel<C> = mpsc::UnboundedReceiver<ScannerEvent<C>>;
|
pub type ScannerEventChannel<N> = mpsc::UnboundedReceiver<ScannerEvent<N>>;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct ScannerDb<C: Coin, D: Db>(PhantomData<C>, PhantomData<D>);
|
struct ScannerDb<N: Network, D: Db>(PhantomData<N>, PhantomData<D>);
|
||||||
impl<C: Coin, D: Db> ScannerDb<C, D> {
|
impl<N: Network, D: Db> ScannerDb<N, D> {
|
||||||
fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||||
D::key(b"SCANNER", dst, key)
|
D::key(b"SCANNER", dst, key)
|
||||||
}
|
}
|
||||||
@@ -46,21 +46,21 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
fn block_key(number: usize) -> Vec<u8> {
|
fn block_key(number: usize) -> Vec<u8> {
|
||||||
Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes())
|
Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes())
|
||||||
}
|
}
|
||||||
fn block_number_key(id: &<C::Block as Block<C>>::Id) -> Vec<u8> {
|
fn block_number_key(id: &<N::Block as Block<N>>::Id) -> Vec<u8> {
|
||||||
Self::scanner_key(b"block_number", id)
|
Self::scanner_key(b"block_number", id)
|
||||||
}
|
}
|
||||||
fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &<C::Block as Block<C>>::Id) {
|
fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &<N::Block as Block<N>>::Id) {
|
||||||
txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes());
|
txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes());
|
||||||
txn.put(Self::block_key(number), id);
|
txn.put(Self::block_key(number), id);
|
||||||
}
|
}
|
||||||
fn block<G: Get>(getter: &G, number: usize) -> Option<<C::Block as Block<C>>::Id> {
|
fn block<G: Get>(getter: &G, number: usize) -> Option<<N::Block as Block<N>>::Id> {
|
||||||
getter.get(Self::block_key(number)).map(|id| {
|
getter.get(Self::block_key(number)).map(|id| {
|
||||||
let mut res = <C::Block as Block<C>>::Id::default();
|
let mut res = <N::Block as Block<N>>::Id::default();
|
||||||
res.as_mut().copy_from_slice(&id);
|
res.as_mut().copy_from_slice(&id);
|
||||||
res
|
res
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn block_number<G: Get>(getter: &G, id: &<C::Block as Block<C>>::Id) -> Option<usize> {
|
fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id) -> Option<usize> {
|
||||||
getter
|
getter
|
||||||
.get(Self::block_number_key(id))
|
.get(Self::block_number_key(id))
|
||||||
.map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap())
|
.map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap())
|
||||||
@@ -69,7 +69,7 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
fn active_keys_key() -> Vec<u8> {
|
fn active_keys_key() -> Vec<u8> {
|
||||||
Self::scanner_key(b"active_keys", b"")
|
Self::scanner_key(b"active_keys", b"")
|
||||||
}
|
}
|
||||||
fn add_active_key(txn: &mut D::Transaction<'_>, key: <C::Curve as Ciphersuite>::G) {
|
fn add_active_key(txn: &mut D::Transaction<'_>, key: <N::Curve as Ciphersuite>::G) {
|
||||||
let mut keys = txn.get(Self::active_keys_key()).unwrap_or(vec![]);
|
let mut keys = txn.get(Self::active_keys_key()).unwrap_or(vec![]);
|
||||||
|
|
||||||
let key_bytes = key.to_bytes();
|
let key_bytes = key.to_bytes();
|
||||||
@@ -90,7 +90,7 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
keys.extend(key_bytes.as_ref());
|
keys.extend(key_bytes.as_ref());
|
||||||
txn.put(Self::active_keys_key(), keys);
|
txn.put(Self::active_keys_key(), keys);
|
||||||
}
|
}
|
||||||
fn active_keys<G: Get>(getter: &G) -> Vec<<C::Curve as Ciphersuite>::G> {
|
fn active_keys<G: Get>(getter: &G) -> Vec<<N::Curve as Ciphersuite>::G> {
|
||||||
let bytes_vec = getter.get(Self::active_keys_key()).unwrap_or(vec![]);
|
let bytes_vec = getter.get(Self::active_keys_key()).unwrap_or(vec![]);
|
||||||
let mut bytes: &[u8] = bytes_vec.as_ref();
|
let mut bytes: &[u8] = bytes_vec.as_ref();
|
||||||
|
|
||||||
@@ -100,35 +100,35 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
// Either are fine
|
// Either are fine
|
||||||
let mut res = Vec::with_capacity(bytes.len() / 32);
|
let mut res = Vec::with_capacity(bytes.len() / 32);
|
||||||
while !bytes.is_empty() {
|
while !bytes.is_empty() {
|
||||||
res.push(C::Curve::read_G(&mut bytes).unwrap());
|
res.push(N::Curve::read_G(&mut bytes).unwrap());
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seen_key(id: &<C::Output as Output>::Id) -> Vec<u8> {
|
fn seen_key(id: &<N::Output as Output>::Id) -> Vec<u8> {
|
||||||
Self::scanner_key(b"seen", id)
|
Self::scanner_key(b"seen", id)
|
||||||
}
|
}
|
||||||
fn seen<G: Get>(getter: &G, id: &<C::Output as Output>::Id) -> bool {
|
fn seen<G: Get>(getter: &G, id: &<N::Output as Output>::Id) -> bool {
|
||||||
getter.get(Self::seen_key(id)).is_some()
|
getter.get(Self::seen_key(id)).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_batch_key() -> Vec<u8> {
|
fn next_batch_key() -> Vec<u8> {
|
||||||
Self::scanner_key(b"next_batch", [])
|
Self::scanner_key(b"next_batch", [])
|
||||||
}
|
}
|
||||||
fn batch_key(key: &<C::Curve as Ciphersuite>::G, block: &<C::Block as Block<C>>::Id) -> Vec<u8> {
|
fn batch_key(key: &<N::Curve as Ciphersuite>::G, block: &<N::Block as Block<N>>::Id) -> Vec<u8> {
|
||||||
Self::scanner_key(b"batch", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
Self::scanner_key(b"batch", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
||||||
}
|
}
|
||||||
fn outputs_key(
|
fn outputs_key(
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
block: &<C::Block as Block<C>>::Id,
|
block: &<N::Block as Block<N>>::Id,
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
Self::scanner_key(b"outputs", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
Self::scanner_key(b"outputs", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
||||||
}
|
}
|
||||||
fn save_outputs(
|
fn save_outputs(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
block: &<C::Block as Block<C>>::Id,
|
block: &<N::Block as Block<N>>::Id,
|
||||||
outputs: &[C::Output],
|
outputs: &[N::Output],
|
||||||
) -> u32 {
|
) -> u32 {
|
||||||
let batch_key = Self::batch_key(key, block);
|
let batch_key = Self::batch_key(key, block);
|
||||||
if let Some(batch) = txn.get(batch_key) {
|
if let Some(batch) = txn.get(batch_key) {
|
||||||
@@ -160,29 +160,29 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
}
|
}
|
||||||
fn outputs(
|
fn outputs(
|
||||||
txn: &D::Transaction<'_>,
|
txn: &D::Transaction<'_>,
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
block: &<C::Block as Block<C>>::Id,
|
block: &<N::Block as Block<N>>::Id,
|
||||||
) -> Option<Vec<C::Output>> {
|
) -> Option<Vec<N::Output>> {
|
||||||
let bytes_vec = txn.get(Self::outputs_key(key, block))?;
|
let bytes_vec = txn.get(Self::outputs_key(key, block))?;
|
||||||
let mut bytes: &[u8] = bytes_vec.as_ref();
|
let mut bytes: &[u8] = bytes_vec.as_ref();
|
||||||
|
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
while !bytes.is_empty() {
|
while !bytes.is_empty() {
|
||||||
res.push(C::Output::read(&mut bytes).unwrap());
|
res.push(N::Output::read(&mut bytes).unwrap());
|
||||||
}
|
}
|
||||||
Some(res)
|
Some(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scanned_block_key(key: &<C::Curve as Ciphersuite>::G) -> Vec<u8> {
|
fn scanned_block_key(key: &<N::Curve as Ciphersuite>::G) -> Vec<u8> {
|
||||||
Self::scanner_key(b"scanned_block", key.to_bytes())
|
Self::scanner_key(b"scanned_block", key.to_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
fn save_scanned_block(
|
fn save_scanned_block(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<N::Curve as Ciphersuite>::G,
|
||||||
block: usize,
|
block: usize,
|
||||||
) -> (Option<<C::Block as Block<C>>::Id>, Vec<C::Output>) {
|
) -> (Option<<N::Block as Block<N>>::Id>, Vec<N::Output>) {
|
||||||
let id = Self::block(txn, block); // It may be None for the first key rotated to
|
let id = Self::block(txn, block); // It may be None for the first key rotated to
|
||||||
let outputs = if let Some(id) = id.as_ref() {
|
let outputs = if let Some(id) = id.as_ref() {
|
||||||
Self::outputs(txn, key, id).unwrap_or(vec![])
|
Self::outputs(txn, key, id).unwrap_or(vec![])
|
||||||
@@ -200,7 +200,7 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
// Return this block's outputs so they can be pruned from the RAM cache
|
// Return this block's outputs so they can be pruned from the RAM cache
|
||||||
(id, outputs)
|
(id, outputs)
|
||||||
}
|
}
|
||||||
fn latest_scanned_block<G: Get>(getter: &G, key: <C::Curve as Ciphersuite>::G) -> usize {
|
fn latest_scanned_block<G: Get>(getter: &G, key: <N::Curve as Ciphersuite>::G) -> usize {
|
||||||
let bytes = getter
|
let bytes = getter
|
||||||
.get(Self::scanned_block_key(&key))
|
.get(Self::scanned_block_key(&key))
|
||||||
.expect("asking for latest scanned block of key which wasn't rotated to");
|
.expect("asking for latest scanned block of key which wasn't rotated to");
|
||||||
@@ -212,26 +212,26 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||||||
/// It WILL NOT fail to emit an event, even if it reboots at selected moments.
|
/// It WILL NOT fail to emit an event, even if it reboots at selected moments.
|
||||||
/// It MAY fire the same event multiple times.
|
/// It MAY fire the same event multiple times.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Scanner<C: Coin, D: Db> {
|
pub struct Scanner<N: Network, D: Db> {
|
||||||
coin: C,
|
network: N,
|
||||||
db: D,
|
db: D,
|
||||||
keys: Vec<<C::Curve as Ciphersuite>::G>,
|
keys: Vec<<N::Curve as Ciphersuite>::G>,
|
||||||
|
|
||||||
eventualities: EventualitiesTracker<C::Eventuality>,
|
eventualities: EventualitiesTracker<N::Eventuality>,
|
||||||
|
|
||||||
ram_scanned: HashMap<Vec<u8>, usize>,
|
ram_scanned: HashMap<Vec<u8>, usize>,
|
||||||
ram_outputs: HashSet<Vec<u8>>,
|
ram_outputs: HashSet<Vec<u8>>,
|
||||||
|
|
||||||
events: mpsc::UnboundedSender<ScannerEvent<C>>,
|
events: mpsc::UnboundedSender<ScannerEvent<N>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ScannerHandle<C: Coin, D: Db> {
|
pub struct ScannerHandle<N: Network, D: Db> {
|
||||||
scanner: Arc<RwLock<Scanner<C, D>>>,
|
scanner: Arc<RwLock<Scanner<N, D>>>,
|
||||||
pub events: ScannerEventChannel<C>,
|
pub events: ScannerEventChannel<N>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
impl<N: Network, D: Db> ScannerHandle<N, D> {
|
||||||
pub async fn ram_scanned(&self) -> usize {
|
pub async fn ram_scanned(&self) -> usize {
|
||||||
let mut res = None;
|
let mut res = None;
|
||||||
for scanned in self.scanner.read().await.ram_scanned.values() {
|
for scanned in self.scanner.read().await.ram_scanned.values() {
|
||||||
@@ -249,7 +249,7 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
block_number: usize,
|
block_number: usize,
|
||||||
id: [u8; 32],
|
id: [u8; 32],
|
||||||
eventuality: C::Eventuality,
|
eventuality: N::Eventuality,
|
||||||
) {
|
) {
|
||||||
self.scanner.write().await.eventualities.register(block_number, id, eventuality)
|
self.scanner.write().await.eventualities.register(block_number, id, eventuality)
|
||||||
}
|
}
|
||||||
@@ -269,7 +269,7 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
activation_number: usize,
|
activation_number: usize,
|
||||||
key: <C::Curve as Ciphersuite>::G,
|
key: <N::Curve as Ciphersuite>::G,
|
||||||
) {
|
) {
|
||||||
let mut scanner = self.scanner.write().await;
|
let mut scanner = self.scanner.write().await;
|
||||||
if !scanner.keys.is_empty() {
|
if !scanner.keys.is_empty() {
|
||||||
@@ -280,41 +280,41 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||||||
|
|
||||||
info!("Rotating scanner to key {} at {activation_number}", hex::encode(key.to_bytes()));
|
info!("Rotating scanner to key {} at {activation_number}", hex::encode(key.to_bytes()));
|
||||||
|
|
||||||
let (_, outputs) = ScannerDb::<C, D>::save_scanned_block(txn, &key, activation_number);
|
let (_, outputs) = ScannerDb::<N, D>::save_scanned_block(txn, &key, activation_number);
|
||||||
scanner.ram_scanned.insert(key.to_bytes().as_ref().to_vec(), activation_number);
|
scanner.ram_scanned.insert(key.to_bytes().as_ref().to_vec(), activation_number);
|
||||||
assert!(outputs.is_empty());
|
assert!(outputs.is_empty());
|
||||||
|
|
||||||
ScannerDb::<C, D>::add_active_key(txn, key);
|
ScannerDb::<N, D>::add_active_key(txn, key);
|
||||||
scanner.keys.push(key);
|
scanner.keys.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This perform a database read which isn't safe with regards to if the value is set or not
|
// This perform a database read which isn't safe with regards to if the value is set or not
|
||||||
// It may be set, when it isn't expected to be set, or not set, when it is expected to be set
|
// It may be set, when it isn't expected to be set, or not set, when it is expected to be set
|
||||||
// Since the value is static, if it's set, it's correctly set
|
// Since the value is static, if it's set, it's correctly set
|
||||||
pub async fn block_number(&self, id: &<C::Block as Block<C>>::Id) -> Option<usize> {
|
pub async fn block_number(&self, id: &<N::Block as Block<N>>::Id) -> Option<usize> {
|
||||||
ScannerDb::<C, D>::block_number(&self.scanner.read().await.db, id)
|
ScannerDb::<N, D>::block_number(&self.scanner.read().await.db, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Acknowledge having handled a block for a key.
|
/// Acknowledge having handled a block for a key.
|
||||||
pub async fn ack_up_to_block(
|
pub async fn ack_up_to_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
key: <C::Curve as Ciphersuite>::G,
|
key: <N::Curve as Ciphersuite>::G,
|
||||||
id: <C::Block as Block<C>>::Id,
|
id: <N::Block as Block<N>>::Id,
|
||||||
) -> (Vec<BlockHash>, Vec<C::Output>) {
|
) -> (Vec<BlockHash>, Vec<N::Output>) {
|
||||||
let mut scanner = self.scanner.write().await;
|
let mut scanner = self.scanner.write().await;
|
||||||
debug!("Block {} acknowledged", hex::encode(&id));
|
debug!("Block {} acknowledged", hex::encode(&id));
|
||||||
|
|
||||||
// Get the number for this block
|
// Get the number for this block
|
||||||
let number = ScannerDb::<C, D>::block_number(txn, &id)
|
let number = ScannerDb::<N, D>::block_number(txn, &id)
|
||||||
.expect("main loop trying to operate on data we haven't scanned");
|
.expect("main loop trying to operate on data we haven't scanned");
|
||||||
// Get the number of the last block we acknowledged
|
// Get the number of the last block we acknowledged
|
||||||
let prior = ScannerDb::<C, D>::latest_scanned_block(txn, key);
|
let prior = ScannerDb::<N, D>::latest_scanned_block(txn, key);
|
||||||
|
|
||||||
let mut blocks = vec![];
|
let mut blocks = vec![];
|
||||||
let mut outputs = vec![];
|
let mut outputs = vec![];
|
||||||
for number in (prior + 1) ..= number {
|
for number in (prior + 1) ..= number {
|
||||||
let (block, these_outputs) = ScannerDb::<C, D>::save_scanned_block(txn, &key, number);
|
let (block, these_outputs) = ScannerDb::<N, D>::save_scanned_block(txn, &key, number);
|
||||||
let block = BlockHash(block.unwrap().as_ref().try_into().unwrap());
|
let block = BlockHash(block.unwrap().as_ref().try_into().unwrap());
|
||||||
blocks.push(block);
|
blocks.push(block);
|
||||||
outputs.extend(these_outputs);
|
outputs.extend(these_outputs);
|
||||||
@@ -329,22 +329,22 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin, D: Db> Scanner<C, D> {
|
impl<N: Network, D: Db> Scanner<N, D> {
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub fn new(coin: C, db: D) -> (ScannerHandle<C, D>, Vec<<C::Curve as Ciphersuite>::G>) {
|
pub fn new(network: N, db: D) -> (ScannerHandle<N, D>, Vec<<N::Curve as Ciphersuite>::G>) {
|
||||||
let (events_send, events_recv) = mpsc::unbounded_channel();
|
let (events_send, events_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let keys = ScannerDb::<C, D>::active_keys(&db);
|
let keys = ScannerDb::<N, D>::active_keys(&db);
|
||||||
let mut ram_scanned = HashMap::new();
|
let mut ram_scanned = HashMap::new();
|
||||||
for key in keys.clone() {
|
for key in keys.clone() {
|
||||||
ram_scanned.insert(
|
ram_scanned.insert(
|
||||||
key.to_bytes().as_ref().to_vec(),
|
key.to_bytes().as_ref().to_vec(),
|
||||||
ScannerDb::<C, D>::latest_scanned_block(&db, key),
|
ScannerDb::<N, D>::latest_scanned_block(&db, key),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let scanner = Arc::new(RwLock::new(Scanner {
|
let scanner = Arc::new(RwLock::new(Scanner {
|
||||||
coin,
|
network,
|
||||||
db,
|
db,
|
||||||
keys: keys.clone(),
|
keys: keys.clone(),
|
||||||
|
|
||||||
@@ -360,7 +360,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
(ScannerHandle { scanner, events: events_recv }, keys)
|
(ScannerHandle { scanner, events: events_recv }, keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emit(&mut self, event: ScannerEvent<C>) -> bool {
|
fn emit(&mut self, event: ScannerEvent<N>) -> bool {
|
||||||
if self.events.send(event).is_err() {
|
if self.events.send(event).is_err() {
|
||||||
info!("Scanner handler was dropped. Shutting down?");
|
info!("Scanner handler was dropped. Shutting down?");
|
||||||
return false;
|
return false;
|
||||||
@@ -377,11 +377,11 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
// Scan new blocks
|
// Scan new blocks
|
||||||
{
|
{
|
||||||
let mut scanner = scanner.write().await;
|
let mut scanner = scanner.write().await;
|
||||||
let latest = scanner.coin.get_latest_block_number().await;
|
let latest = scanner.network.get_latest_block_number().await;
|
||||||
let latest = match latest {
|
let latest = match latest {
|
||||||
// Only scan confirmed blocks, which we consider effectively finalized
|
// Only scan confirmed blocks, which we consider effectively finalized
|
||||||
// CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm
|
// CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm
|
||||||
Ok(latest) => latest.saturating_sub(C::CONFIRMATIONS.saturating_sub(1)),
|
Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("couldn't get latest block number");
|
warn!("couldn't get latest block number");
|
||||||
sleep(Duration::from_secs(60)).await;
|
sleep(Duration::from_secs(60)).await;
|
||||||
@@ -396,7 +396,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
for i in (latest_scanned + 1) ..= latest {
|
for i in (latest_scanned + 1) ..= latest {
|
||||||
// TODO2: Check for key deprecation
|
// TODO2: Check for key deprecation
|
||||||
|
|
||||||
let block = match scanner.coin.get_block(i).await {
|
let block = match scanner.network.get_block(i).await {
|
||||||
Ok(block) => block,
|
Ok(block) => block,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("couldn't get block {i}");
|
warn!("couldn't get block {i}");
|
||||||
@@ -409,14 +409,14 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
// only written to/read by this thread
|
// only written to/read by this thread
|
||||||
// There's also no error caused by them being unexpectedly written (if the commit is
|
// There's also no error caused by them being unexpectedly written (if the commit is
|
||||||
// made and then the processor suddenly reboots)
|
// made and then the processor suddenly reboots)
|
||||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db, i) {
|
if let Some(id) = ScannerDb::<N, D>::block(&scanner.db, i) {
|
||||||
if id != block_id {
|
if id != block_id {
|
||||||
panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id));
|
panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
info!("Found new block: {}", hex::encode(&block_id));
|
info!("Found new block: {}", hex::encode(&block_id));
|
||||||
|
|
||||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db, i.saturating_sub(1)) {
|
if let Some(id) = ScannerDb::<N, D>::block(&scanner.db, i.saturating_sub(1)) {
|
||||||
if id != block.parent() {
|
if id != block.parent() {
|
||||||
panic!(
|
panic!(
|
||||||
"block {} doesn't build off expected parent {}",
|
"block {} doesn't build off expected parent {}",
|
||||||
@@ -427,15 +427,16 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut txn = scanner.db.txn();
|
let mut txn = scanner.db.txn();
|
||||||
ScannerDb::<C, D>::save_block(&mut txn, i, &block_id);
|
ScannerDb::<N, D>::save_block(&mut txn, i, &block_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone coin because we can't borrow it while also mutably borrowing the eventualities
|
// Clone network because we can't borrow it while also mutably borrowing the
|
||||||
// Thankfully, coin is written to be a cheap clone
|
// eventualities
|
||||||
let coin = scanner.coin.clone();
|
// Thankfully, network is written to be a cheap clone
|
||||||
|
let network = scanner.network.clone();
|
||||||
for (id, tx) in
|
for (id, tx) in
|
||||||
coin.get_eventuality_completions(&mut scanner.eventualities, &block).await
|
network.get_eventuality_completions(&mut scanner.eventualities, &block).await
|
||||||
{
|
{
|
||||||
// This should only happen if there's a P2P net desync or there's a malicious
|
// This should only happen if there's a P2P net desync or there's a malicious
|
||||||
// validator
|
// validator
|
||||||
@@ -450,7 +451,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let outputs = match scanner.coin.get_outputs(&block, key).await {
|
let outputs = match scanner.network.get_outputs(&block, key).await {
|
||||||
Ok(outputs) => outputs,
|
Ok(outputs) => outputs,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("Couldn't scan block {i}");
|
warn!("Couldn't scan block {i}");
|
||||||
@@ -499,7 +500,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
|
|
||||||
TODO: Only update ram_outputs after committing the TXN in question.
|
TODO: Only update ram_outputs after committing the TXN in question.
|
||||||
*/
|
*/
|
||||||
let seen = ScannerDb::<C, D>::seen(&scanner.db, &id);
|
let seen = ScannerDb::<N, D>::seen(&scanner.db, &id);
|
||||||
let id = id.as_ref().to_vec();
|
let id = id.as_ref().to_vec();
|
||||||
if seen || scanner.ram_outputs.contains(&id) {
|
if seen || scanner.ram_outputs.contains(&id) {
|
||||||
panic!("scanned an output multiple times");
|
panic!("scanned an output multiple times");
|
||||||
@@ -513,7 +514,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||||||
|
|
||||||
// Save the outputs to disk
|
// Save the outputs to disk
|
||||||
let mut txn = scanner.db.txn();
|
let mut txn = scanner.db.txn();
|
||||||
let batch = ScannerDb::<C, D>::save_outputs(&mut txn, &key, &block_id, &outputs);
|
let batch = ScannerDb::<N, D>::save_outputs(&mut txn, &key, &block_id, &outputs);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
// Send all outputs
|
// Send all outputs
|
||||||
|
|||||||
@@ -6,14 +6,14 @@ use std::{
|
|||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
coins::{Output, Coin},
|
networks::{Output, Network},
|
||||||
DbTxn, Db, Payment, Plan,
|
DbTxn, Db, Payment, Plan,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Stateless, deterministic output/payment manager.
|
/// Stateless, deterministic output/payment manager.
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
pub struct Scheduler<C: Coin> {
|
pub struct Scheduler<N: Network> {
|
||||||
key: <C::Curve as Ciphersuite>::G,
|
key: <N::Curve as Ciphersuite>::G,
|
||||||
|
|
||||||
// Serai, when it has more outputs expected than it can handle in a single tranaction, will
|
// Serai, when it has more outputs expected than it can handle in a single tranaction, will
|
||||||
// schedule the outputs to be handled later. Immediately, it just creates additional outputs
|
// schedule the outputs to be handled later. Immediately, it just creates additional outputs
|
||||||
@@ -31,22 +31,22 @@ pub struct Scheduler<C: Coin> {
|
|||||||
// output actually has, and it'll be moved into plans
|
// output actually has, and it'll be moved into plans
|
||||||
//
|
//
|
||||||
// TODO2: Consider edge case where branch/change isn't mined yet keys are deprecated
|
// TODO2: Consider edge case where branch/change isn't mined yet keys are deprecated
|
||||||
queued_plans: HashMap<u64, VecDeque<Vec<Payment<C>>>>,
|
queued_plans: HashMap<u64, VecDeque<Vec<Payment<N>>>>,
|
||||||
plans: HashMap<u64, VecDeque<Vec<Payment<C>>>>,
|
plans: HashMap<u64, VecDeque<Vec<Payment<N>>>>,
|
||||||
|
|
||||||
// UTXOs available
|
// UTXOs available
|
||||||
utxos: Vec<C::Output>,
|
utxos: Vec<N::Output>,
|
||||||
|
|
||||||
// Payments awaiting scheduling due to the output availability problem
|
// Payments awaiting scheduling due to the output availability problem
|
||||||
payments: VecDeque<Payment<C>>,
|
payments: VecDeque<Payment<N>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scheduler_key<D: Db, G: GroupEncoding>(key: &G) -> Vec<u8> {
|
fn scheduler_key<D: Db, G: GroupEncoding>(key: &G) -> Vec<u8> {
|
||||||
D::key(b"SCHEDULER", b"scheduler", key.to_bytes())
|
D::key(b"SCHEDULER", b"scheduler", key.to_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin> Scheduler<C> {
|
impl<N: Network> Scheduler<N> {
|
||||||
fn read<R: Read>(key: <C::Curve as Ciphersuite>::G, reader: &mut R) -> io::Result<Self> {
|
fn read<R: Read>(key: <N::Curve as Ciphersuite>::G, reader: &mut R) -> io::Result<Self> {
|
||||||
let mut read_plans = || -> io::Result<_> {
|
let mut read_plans = || -> io::Result<_> {
|
||||||
let mut all_plans = HashMap::new();
|
let mut all_plans = HashMap::new();
|
||||||
let mut all_plans_len = [0; 4];
|
let mut all_plans_len = [0; 4];
|
||||||
@@ -80,7 +80,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
let mut utxos_len = [0; 4];
|
let mut utxos_len = [0; 4];
|
||||||
reader.read_exact(&mut utxos_len)?;
|
reader.read_exact(&mut utxos_len)?;
|
||||||
for _ in 0 .. u32::from_le_bytes(utxos_len) {
|
for _ in 0 .. u32::from_le_bytes(utxos_len) {
|
||||||
utxos.push(C::Output::read(reader)?);
|
utxos.push(N::Output::read(reader)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut payments = VecDeque::new();
|
let mut payments = VecDeque::new();
|
||||||
@@ -99,7 +99,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
fn serialize(&self) -> Vec<u8> {
|
fn serialize(&self) -> Vec<u8> {
|
||||||
let mut res = Vec::with_capacity(4096);
|
let mut res = Vec::with_capacity(4096);
|
||||||
|
|
||||||
let mut write_plans = |plans: &HashMap<u64, VecDeque<Vec<Payment<C>>>>| {
|
let mut write_plans = |plans: &HashMap<u64, VecDeque<Vec<Payment<N>>>>| {
|
||||||
res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes());
|
res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes());
|
||||||
for (amount, list_of_plans) in plans {
|
for (amount, list_of_plans) in plans {
|
||||||
res.extend(amount.to_le_bytes());
|
res.extend(amount.to_le_bytes());
|
||||||
@@ -129,7 +129,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new<D: Db>(txn: &mut D::Transaction<'_>, key: <C::Curve as Ciphersuite>::G) -> Self {
|
pub fn new<D: Db>(txn: &mut D::Transaction<'_>, key: <N::Curve as Ciphersuite>::G) -> Self {
|
||||||
let res = Scheduler {
|
let res = Scheduler {
|
||||||
key,
|
key,
|
||||||
queued_plans: HashMap::new(),
|
queued_plans: HashMap::new(),
|
||||||
@@ -142,7 +142,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_db<D: Db>(db: &D, key: <C::Curve as Ciphersuite>::G) -> io::Result<Self> {
|
pub fn from_db<D: Db>(db: &D, key: <N::Curve as Ciphersuite>::G) -> io::Result<Self> {
|
||||||
let scheduler = db.get(scheduler_key::<D, _>(&key)).unwrap_or_else(|| {
|
let scheduler = db.get(scheduler_key::<D, _>(&key)).unwrap_or_else(|| {
|
||||||
panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes()))
|
panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes()))
|
||||||
});
|
});
|
||||||
@@ -152,10 +152,10 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
Self::read(key, reader)
|
Self::read(key, reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execute(&mut self, inputs: Vec<C::Output>, mut payments: Vec<Payment<C>>) -> Plan<C> {
|
fn execute(&mut self, inputs: Vec<N::Output>, mut payments: Vec<Payment<N>>) -> Plan<N> {
|
||||||
// This must be equal to plan.key due to how coins detect they created outputs which are to
|
// This must be equal to plan.key due to how networks detect they created outputs which are to
|
||||||
// the branch address
|
// the branch address
|
||||||
let branch_address = C::branch_address(self.key);
|
let branch_address = N::branch_address(self.key);
|
||||||
// created_output will be called any time we send to a branch address
|
// created_output will be called any time we send to a branch address
|
||||||
// If it's called, and it wasn't expecting to be called, that's almost certainly an error
|
// If it's called, and it wasn't expecting to be called, that's almost certainly an error
|
||||||
// The only way it wouldn't be is if someone on Serai triggered a burn to a branch, which is
|
// The only way it wouldn't be is if someone on Serai triggered a burn to a branch, which is
|
||||||
@@ -166,10 +166,10 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
payments.drain(..).filter(|payment| payment.address != branch_address).collect::<Vec<_>>();
|
payments.drain(..).filter(|payment| payment.address != branch_address).collect::<Vec<_>>();
|
||||||
|
|
||||||
let mut change = false;
|
let mut change = false;
|
||||||
let mut max = C::MAX_OUTPUTS;
|
let mut max = N::MAX_OUTPUTS;
|
||||||
|
|
||||||
let payment_amounts =
|
let payment_amounts =
|
||||||
|payments: &Vec<Payment<C>>| payments.iter().map(|payment| payment.amount).sum::<u64>();
|
|payments: &Vec<Payment<N>>| payments.iter().map(|payment| payment.amount).sum::<u64>();
|
||||||
|
|
||||||
// Requires a change output
|
// Requires a change output
|
||||||
if inputs.iter().map(Output::amount).sum::<u64>() != payment_amounts(&payments) {
|
if inputs.iter().map(Output::amount).sum::<u64>() != payment_amounts(&payments) {
|
||||||
@@ -192,9 +192,9 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
// If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves
|
// If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves
|
||||||
while payments.len() > max {
|
while payments.len() > max {
|
||||||
// The resulting TX will have the remaining payments and a new branch payment
|
// The resulting TX will have the remaining payments and a new branch payment
|
||||||
let to_remove = (payments.len() + 1) - C::MAX_OUTPUTS;
|
let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS;
|
||||||
// Don't remove more than possible
|
// Don't remove more than possible
|
||||||
let to_remove = to_remove.min(C::MAX_OUTPUTS);
|
let to_remove = to_remove.min(N::MAX_OUTPUTS);
|
||||||
|
|
||||||
// Create the plan
|
// Create the plan
|
||||||
let removed = payments.drain((payments.len() - to_remove) ..).collect::<Vec<_>>();
|
let removed = payments.drain((payments.len() - to_remove) ..).collect::<Vec<_>>();
|
||||||
@@ -211,7 +211,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
Plan { key: self.key, inputs, payments, change: Some(self.key).filter(|_| change) }
|
Plan { key: self.key, inputs, payments, change: Some(self.key).filter(|_| change) }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_outputs(&mut self, mut utxos: Vec<C::Output>) -> Vec<Plan<C>> {
|
fn add_outputs(&mut self, mut utxos: Vec<N::Output>) -> Vec<Plan<N>> {
|
||||||
log::info!("adding {} outputs", utxos.len());
|
log::info!("adding {} outputs", utxos.len());
|
||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
@@ -247,9 +247,9 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
pub fn schedule<D: Db>(
|
pub fn schedule<D: Db>(
|
||||||
&mut self,
|
&mut self,
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
utxos: Vec<C::Output>,
|
utxos: Vec<N::Output>,
|
||||||
payments: Vec<Payment<C>>,
|
payments: Vec<Payment<N>>,
|
||||||
) -> Vec<Plan<C>> {
|
) -> Vec<Plan<N>> {
|
||||||
let mut plans = self.add_outputs(utxos);
|
let mut plans = self.add_outputs(utxos);
|
||||||
|
|
||||||
log::info!("scheduling {} new payments", payments.len());
|
log::info!("scheduling {} new payments", payments.len());
|
||||||
@@ -275,7 +275,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
// Since we do multiple aggregation TXs at once, this will execute in logarithmic time
|
// Since we do multiple aggregation TXs at once, this will execute in logarithmic time
|
||||||
let utxos = self.utxos.drain(..).collect::<Vec<_>>();
|
let utxos = self.utxos.drain(..).collect::<Vec<_>>();
|
||||||
let mut utxo_chunks =
|
let mut utxo_chunks =
|
||||||
utxos.chunks(C::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::<Vec<_>>();
|
utxos.chunks(N::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::<Vec<_>>();
|
||||||
|
|
||||||
// Use the first chunk for any scheduled payments, since it has the most value
|
// Use the first chunk for any scheduled payments, since it has the most value
|
||||||
let utxos = utxo_chunks.remove(0);
|
let utxos = utxo_chunks.remove(0);
|
||||||
@@ -294,7 +294,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
// TODO: While payments have their TXs' fees deducted from themselves, that doesn't hold here
|
// TODO: While payments have their TXs' fees deducted from themselves, that doesn't hold here
|
||||||
// We need to charge a fee before reporting incoming UTXOs to Substrate to cover aggregation
|
// We need to charge a fee before reporting incoming UTXOs to Substrate to cover aggregation
|
||||||
// TXs
|
// TXs
|
||||||
log::debug!("aggregating a chunk of {} inputs", C::MAX_INPUTS);
|
log::debug!("aggregating a chunk of {} inputs", N::MAX_INPUTS);
|
||||||
plans.push(Plan { key: self.key, inputs: chunk, payments: vec![], change: Some(self.key) })
|
plans.push(Plan { key: self.key, inputs: chunk, payments: vec![], change: Some(self.key) })
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -303,7 +303,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
|
|
||||||
// If we can't fulfill the next payment, we have encountered an instance of the UTXO
|
// If we can't fulfill the next payment, we have encountered an instance of the UTXO
|
||||||
// availability problem
|
// availability problem
|
||||||
// This shows up in coins like Monero, where because we spent outputs, our change has yet to
|
// This shows up in networks like Monero, where because we spent outputs, our change has yet to
|
||||||
// re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset
|
// re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset
|
||||||
// of our total balance
|
// of our total balance
|
||||||
// Despite this, we may be order to fulfill a payment which is our total balance
|
// Despite this, we may be order to fulfill a payment which is our total balance
|
||||||
@@ -369,7 +369,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Amortize the fee amongst all payments
|
// Amortize the fee amongst all payments
|
||||||
// While some coins, like Ethereum, may have some payments take notably more gas, those
|
// While some networks, like Ethereum, may have some payments take notably more gas, those
|
||||||
// payments will have their own gas deducted when they're created. The difference in output
|
// payments will have their own gas deducted when they're created. The difference in output
|
||||||
// value present here is solely the cost of the branch, which is used for all of these
|
// value present here is solely the cost of the branch, which is used for all of these
|
||||||
// payments, regardless of how much they'll end up costing
|
// payments, regardless of how much they'll end up costing
|
||||||
@@ -387,7 +387,7 @@ impl<C: Coin> Scheduler<C> {
|
|||||||
|
|
||||||
// Drop payments now below the dust threshold
|
// Drop payments now below the dust threshold
|
||||||
let payments =
|
let payments =
|
||||||
payments.drain(..).filter(|payment| payment.amount >= C::DUST).collect::<Vec<_>>();
|
payments.drain(..).filter(|payment| payment.amount >= N::DUST).collect::<Vec<_>>();
|
||||||
// Sanity check this was done properly
|
// Sanity check this was done properly
|
||||||
assert!(actual >= payments.iter().map(|payment| payment.amount).sum::<u64>());
|
assert!(actual >= payments.iter().map(|payment| payment.amount).sum::<u64>());
|
||||||
if payments.is_empty() {
|
if payments.is_empty() {
|
||||||
|
|||||||
@@ -14,18 +14,18 @@ use log::{info, debug, warn, error};
|
|||||||
use messages::sign::*;
|
use messages::sign::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
Get, DbTxn, Db,
|
Get, DbTxn, Db,
|
||||||
coins::{Transaction, Eventuality, Coin},
|
networks::{Transaction, Eventuality, Network},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum SignerEvent<C: Coin> {
|
pub enum SignerEvent<N: Network> {
|
||||||
SignedTransaction { id: [u8; 32], tx: <C::Transaction as Transaction<C>>::Id },
|
SignedTransaction { id: [u8; 32], tx: <N::Transaction as Transaction<N>>::Id },
|
||||||
ProcessorMessage(ProcessorMessage),
|
ProcessorMessage(ProcessorMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct SignerDb<C: Coin, D: Db>(D, PhantomData<C>);
|
struct SignerDb<N: Network, D: Db>(D, PhantomData<N>);
|
||||||
impl<C: Coin, D: Db> SignerDb<C, D> {
|
impl<N: Network, D: Db> SignerDb<N, D> {
|
||||||
fn sign_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
fn sign_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||||
D::key(b"SIGNER", dst, key)
|
D::key(b"SIGNER", dst, key)
|
||||||
}
|
}
|
||||||
@@ -36,7 +36,7 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||||||
fn complete(
|
fn complete(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
id: [u8; 32],
|
id: [u8; 32],
|
||||||
tx: &<C::Transaction as Transaction<C>>::Id,
|
tx: &<N::Transaction as Transaction<N>>::Id,
|
||||||
) {
|
) {
|
||||||
// Transactions can be completed by multiple signatures
|
// Transactions can be completed by multiple signatures
|
||||||
// Save every solution in order to be robust
|
// Save every solution in order to be robust
|
||||||
@@ -64,12 +64,12 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||||||
fn eventuality_key(id: [u8; 32]) -> Vec<u8> {
|
fn eventuality_key(id: [u8; 32]) -> Vec<u8> {
|
||||||
Self::sign_key(b"eventuality", id)
|
Self::sign_key(b"eventuality", id)
|
||||||
}
|
}
|
||||||
fn save_eventuality(txn: &mut D::Transaction<'_>, id: [u8; 32], eventuality: C::Eventuality) {
|
fn save_eventuality(txn: &mut D::Transaction<'_>, id: [u8; 32], eventuality: N::Eventuality) {
|
||||||
txn.put(Self::eventuality_key(id), eventuality.serialize());
|
txn.put(Self::eventuality_key(id), eventuality.serialize());
|
||||||
}
|
}
|
||||||
fn eventuality<G: Get>(getter: &G, id: [u8; 32]) -> Option<C::Eventuality> {
|
fn eventuality<G: Get>(getter: &G, id: [u8; 32]) -> Option<N::Eventuality> {
|
||||||
Some(
|
Some(
|
||||||
C::Eventuality::read::<&[u8]>(&mut getter.get(Self::eventuality_key(id))?.as_ref()).unwrap(),
|
N::Eventuality::read::<&[u8]>(&mut getter.get(Self::eventuality_key(id))?.as_ref()).unwrap(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,49 +83,49 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||||||
getter.get(Self::attempt_key(id)).is_some()
|
getter.get(Self::attempt_key(id)).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_transaction(txn: &mut D::Transaction<'_>, tx: &C::Transaction) {
|
fn save_transaction(txn: &mut D::Transaction<'_>, tx: &N::Transaction) {
|
||||||
txn.put(Self::sign_key(b"tx", tx.id()), tx.serialize());
|
txn.put(Self::sign_key(b"tx", tx.id()), tx.serialize());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Signer<C: Coin, D: Db> {
|
pub struct Signer<N: Network, D: Db> {
|
||||||
db: PhantomData<D>,
|
db: PhantomData<D>,
|
||||||
|
|
||||||
coin: C,
|
network: N,
|
||||||
|
|
||||||
keys: ThresholdKeys<C::Curve>,
|
keys: ThresholdKeys<N::Curve>,
|
||||||
|
|
||||||
signable: HashMap<[u8; 32], C::SignableTransaction>,
|
signable: HashMap<[u8; 32], N::SignableTransaction>,
|
||||||
attempt: HashMap<[u8; 32], u32>,
|
attempt: HashMap<[u8; 32], u32>,
|
||||||
preprocessing: HashMap<[u8; 32], <C::TransactionMachine as PreprocessMachine>::SignMachine>,
|
preprocessing: HashMap<[u8; 32], <N::TransactionMachine as PreprocessMachine>::SignMachine>,
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
signing: HashMap<
|
signing: HashMap<
|
||||||
[u8; 32],
|
[u8; 32],
|
||||||
<
|
<
|
||||||
<C::TransactionMachine as PreprocessMachine>::SignMachine as SignMachine<C::Transaction>
|
<N::TransactionMachine as PreprocessMachine>::SignMachine as SignMachine<N::Transaction>
|
||||||
>::SignatureMachine,
|
>::SignatureMachine,
|
||||||
>,
|
>,
|
||||||
|
|
||||||
pub events: VecDeque<SignerEvent<C>>,
|
pub events: VecDeque<SignerEvent<N>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin, D: Db> fmt::Debug for Signer<C, D> {
|
impl<N: Network, D: Db> fmt::Debug for Signer<N, D> {
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
fmt
|
fmt
|
||||||
.debug_struct("Signer")
|
.debug_struct("Signer")
|
||||||
.field("coin", &self.coin)
|
.field("network", &self.network)
|
||||||
.field("signable", &self.signable)
|
.field("signable", &self.signable)
|
||||||
.field("attempt", &self.attempt)
|
.field("attempt", &self.attempt)
|
||||||
.finish_non_exhaustive()
|
.finish_non_exhaustive()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Coin, D: Db> Signer<C, D> {
|
impl<N: Network, D: Db> Signer<N, D> {
|
||||||
pub fn new(coin: C, keys: ThresholdKeys<C::Curve>) -> Signer<C, D> {
|
pub fn new(network: N, keys: ThresholdKeys<N::Curve>) -> Signer<N, D> {
|
||||||
Signer {
|
Signer {
|
||||||
db: PhantomData,
|
db: PhantomData,
|
||||||
|
|
||||||
coin,
|
network,
|
||||||
|
|
||||||
keys,
|
keys,
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keys(&self) -> ThresholdKeys<C::Curve> {
|
pub fn keys(&self) -> ThresholdKeys<N::Curve> {
|
||||||
self.keys.clone()
|
self.keys.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,7 +173,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
|
fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
|
||||||
if SignerDb::<C, D>::completed(txn, id).is_some() {
|
if SignerDb::<N, D>::completed(txn, id).is_some() {
|
||||||
debug!(
|
debug!(
|
||||||
"SignTransaction/Reattempt order for {}, which we've already completed signing",
|
"SignTransaction/Reattempt order for {}, which we've already completed signing",
|
||||||
hex::encode(id)
|
hex::encode(id)
|
||||||
@@ -185,7 +185,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete(&mut self, id: [u8; 32], tx_id: <C::Transaction as Transaction<C>>::Id) {
|
fn complete(&mut self, id: [u8; 32], tx_id: <N::Transaction as Transaction<N>>::Id) {
|
||||||
// Assert we're actively signing for this TX
|
// Assert we're actively signing for this TX
|
||||||
assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for");
|
assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for");
|
||||||
assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have");
|
assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have");
|
||||||
@@ -205,14 +205,14 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
id: [u8; 32],
|
id: [u8; 32],
|
||||||
tx_id: &<C::Transaction as Transaction<C>>::Id,
|
tx_id: &<N::Transaction as Transaction<N>>::Id,
|
||||||
) {
|
) {
|
||||||
if let Some(eventuality) = SignerDb::<C, D>::eventuality(txn, id) {
|
if let Some(eventuality) = SignerDb::<N, D>::eventuality(txn, id) {
|
||||||
// Transaction hasn't hit our mempool/was dropped for a different signature
|
// Transaction hasn't hit our mempool/was dropped for a different signature
|
||||||
// The latter can happen given certain latency conditions/a single malicious signer
|
// The latter can happen given certain latency conditions/a single malicious signer
|
||||||
// In the case of a single malicious signer, they can drag multiple honest
|
// In the case of a single malicious signer, they can drag multiple honest
|
||||||
// validators down with them, so we unfortunately can't slash on this case
|
// validators down with them, so we unfortunately can't slash on this case
|
||||||
let Ok(tx) = self.coin.get_transaction(tx_id).await else {
|
let Ok(tx) = self.network.get_transaction(tx_id).await else {
|
||||||
warn!(
|
warn!(
|
||||||
"a validator claimed {} completed {} yet we didn't have that TX in our mempool",
|
"a validator claimed {} completed {} yet we didn't have that TX in our mempool",
|
||||||
hex::encode(tx_id),
|
hex::encode(tx_id),
|
||||||
@@ -221,14 +221,14 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.coin.confirm_completion(&eventuality, &tx) {
|
if self.network.confirm_completion(&eventuality, &tx) {
|
||||||
info!("eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id));
|
info!("eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id));
|
||||||
|
|
||||||
let first_completion = !self.already_completed(txn, id);
|
let first_completion = !self.already_completed(txn, id);
|
||||||
|
|
||||||
// Save this completion to the DB
|
// Save this completion to the DB
|
||||||
SignerDb::<C, D>::save_transaction(txn, &tx);
|
SignerDb::<N, D>::save_transaction(txn, &tx);
|
||||||
SignerDb::<C, D>::complete(txn, id, tx_id);
|
SignerDb::<N, D>::complete(txn, id, tx_id);
|
||||||
|
|
||||||
if first_completion {
|
if first_completion {
|
||||||
self.complete(id, tx.id());
|
self.complete(id, tx.id());
|
||||||
@@ -298,7 +298,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
// branch again for something we've already attempted
|
// branch again for something we've already attempted
|
||||||
//
|
//
|
||||||
// Only run if this hasn't already been attempted
|
// Only run if this hasn't already been attempted
|
||||||
if SignerDb::<C, D>::has_attempt(txn, &id) {
|
if SignerDb::<N, D>::has_attempt(txn, &id) {
|
||||||
warn!(
|
warn!(
|
||||||
"already attempted {} #{}. this is an error if we didn't reboot",
|
"already attempted {} #{}. this is an error if we didn't reboot",
|
||||||
hex::encode(id.id),
|
hex::encode(id.id),
|
||||||
@@ -307,10 +307,10 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SignerDb::<C, D>::attempt(txn, &id);
|
SignerDb::<N, D>::attempt(txn, &id);
|
||||||
|
|
||||||
// Attempt to create the TX
|
// Attempt to create the TX
|
||||||
let machine = match self.coin.attempt_send(tx).await {
|
let machine = match self.network.attempt_send(tx).await {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
|
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
|
||||||
return;
|
return;
|
||||||
@@ -336,14 +336,14 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
id: [u8; 32],
|
id: [u8; 32],
|
||||||
tx: C::SignableTransaction,
|
tx: N::SignableTransaction,
|
||||||
eventuality: C::Eventuality,
|
eventuality: N::Eventuality,
|
||||||
) {
|
) {
|
||||||
if self.already_completed(txn, id) {
|
if self.already_completed(txn, id) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SignerDb::<C, D>::save_eventuality(txn, id, eventuality);
|
SignerDb::<N, D>::save_eventuality(txn, id, eventuality);
|
||||||
|
|
||||||
self.signable.insert(id, tx);
|
self.signable.insert(id, tx);
|
||||||
self.attempt(txn, id, 0).await;
|
self.attempt(txn, id, 0).await;
|
||||||
@@ -445,12 +445,12 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Save the transaction in case it's needed for recovery
|
// Save the transaction in case it's needed for recovery
|
||||||
SignerDb::<C, D>::save_transaction(txn, &tx);
|
SignerDb::<N, D>::save_transaction(txn, &tx);
|
||||||
let tx_id = tx.id();
|
let tx_id = tx.id();
|
||||||
SignerDb::<C, D>::complete(txn, id.id, &tx_id);
|
SignerDb::<N, D>::complete(txn, id.id, &tx_id);
|
||||||
|
|
||||||
// Publish it
|
// Publish it
|
||||||
if let Err(e) = self.coin.publish_transaction(&tx).await {
|
if let Err(e) = self.network.publish_transaction(&tx).await {
|
||||||
error!("couldn't publish {:?}: {:?}", tx, e);
|
error!("couldn't publish {:?}: {:?}", tx, e);
|
||||||
} else {
|
} else {
|
||||||
info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id));
|
info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id));
|
||||||
@@ -465,7 +465,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
|
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
|
||||||
let mut tx = <C::Transaction as Transaction<C>>::Id::default();
|
let mut tx = <N::Transaction as Transaction<N>>::Id::default();
|
||||||
if tx.as_ref().len() != tx_vec.len() {
|
if tx.as_ref().len() != tx_vec.len() {
|
||||||
let true_len = tx_vec.len();
|
let true_len = tx_vec.len();
|
||||||
tx_vec.truncate(2 * tx.as_ref().len());
|
tx_vec.truncate(2 * tx.as_ref().len());
|
||||||
|
|||||||
@@ -11,18 +11,18 @@ use serai_db::{DbTxn, MemDb};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Plan, Db,
|
Plan, Db,
|
||||||
coins::{OutputType, Output, Block, Coin},
|
networks::{OutputType, Output, Block, Network},
|
||||||
scanner::{ScannerEvent, Scanner, ScannerHandle},
|
scanner::{ScannerEvent, Scanner, ScannerHandle},
|
||||||
tests::sign,
|
tests::sign,
|
||||||
};
|
};
|
||||||
|
|
||||||
async fn spend<C: Coin, D: Db>(
|
async fn spend<N: Network, D: Db>(
|
||||||
coin: &C,
|
network: &N,
|
||||||
keys: &HashMap<Participant, ThresholdKeys<C::Curve>>,
|
keys: &HashMap<Participant, ThresholdKeys<N::Curve>>,
|
||||||
scanner: &mut ScannerHandle<C, D>,
|
scanner: &mut ScannerHandle<N, D>,
|
||||||
batch: u32,
|
batch: u32,
|
||||||
outputs: Vec<C::Output>,
|
outputs: Vec<N::Output>,
|
||||||
) -> Vec<C::Output> {
|
) -> Vec<N::Output> {
|
||||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
let mut keys_txs = HashMap::new();
|
let mut keys_txs = HashMap::new();
|
||||||
@@ -31,13 +31,13 @@ async fn spend<C: Coin, D: Db>(
|
|||||||
*i,
|
*i,
|
||||||
(
|
(
|
||||||
keys.clone(),
|
keys.clone(),
|
||||||
coin
|
network
|
||||||
.prepare_send(
|
.prepare_send(
|
||||||
keys.clone(),
|
keys.clone(),
|
||||||
coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS,
|
network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS,
|
||||||
// Send to a change output
|
// Send to a change output
|
||||||
Plan { key, inputs: outputs.clone(), payments: vec![], change: Some(key) },
|
Plan { key, inputs: outputs.clone(), payments: vec![], change: Some(key) },
|
||||||
coin.get_fee().await,
|
network.get_fee().await,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -46,10 +46,10 @@ async fn spend<C: Coin, D: Db>(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
sign(coin.clone(), keys_txs).await;
|
sign(network.clone(), keys_txs).await;
|
||||||
|
|
||||||
for _ in 0 .. C::CONFIRMATIONS {
|
for _ in 0 .. N::CONFIRMATIONS {
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
}
|
}
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block { key: this_key, block: _, batch: this_batch, outputs } => {
|
ScannerEvent::Block { key: this_key, block: _, batch: this_batch, outputs } => {
|
||||||
@@ -66,27 +66,27 @@ async fn spend<C: Coin, D: Db>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_addresses<C: Coin>(coin: C) {
|
pub async fn test_addresses<N: Network>(network: N) {
|
||||||
let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng);
|
let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng);
|
||||||
for (_, keys) in keys.iter_mut() {
|
for (_, keys) in keys.iter_mut() {
|
||||||
C::tweak_keys(keys);
|
N::tweak_keys(keys);
|
||||||
}
|
}
|
||||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
// Mine blocks so there's a confirmed block
|
// Mine blocks so there's a confirmed block
|
||||||
for _ in 0 .. C::CONFIRMATIONS {
|
for _ in 0 .. N::CONFIRMATIONS {
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut db = MemDb::new();
|
let mut db = MemDb::new();
|
||||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone());
|
||||||
assert!(active_keys.is_empty());
|
assert!(active_keys.is_empty());
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
|
scanner.rotate_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
// Receive funds to the branch address and make sure it's properly identified
|
// Receive funds to the branch address and make sure it's properly identified
|
||||||
let block_id = coin.test_send(C::branch_address(key)).await.id();
|
let block_id = network.test_send(N::branch_address(key)).await.id();
|
||||||
|
|
||||||
// Verify the Scanner picked them up
|
// Verify the Scanner picked them up
|
||||||
let outputs =
|
let outputs =
|
||||||
@@ -105,7 +105,7 @@ pub async fn test_addresses<C: Coin>(coin: C) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Spend the branch output, creating a change output and ensuring we actually get change
|
// Spend the branch output, creating a change output and ensuring we actually get change
|
||||||
let outputs = spend(&coin, &keys, &mut scanner, 1, outputs).await;
|
let outputs = spend(&network, &keys, &mut scanner, 1, outputs).await;
|
||||||
// Also test spending the change output
|
// Also test spending the change output
|
||||||
spend(&coin, &keys, &mut scanner, 2, outputs).await;
|
spend(&network, &keys, &mut scanner, 2, outputs).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,14 +17,14 @@ use serai_client::{
|
|||||||
|
|
||||||
use messages::key_gen::*;
|
use messages::key_gen::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
coins::Coin,
|
networks::Network,
|
||||||
key_gen::{KeyConfirmed, KeyGen},
|
key_gen::{KeyConfirmed, KeyGen},
|
||||||
};
|
};
|
||||||
|
|
||||||
const ID: KeyGenId =
|
const ID: KeyGenId =
|
||||||
KeyGenId { set: ValidatorSet { session: Session(1), network: NetworkId::Monero }, attempt: 3 };
|
KeyGenId { set: ValidatorSet { session: Session(1), network: NetworkId::Monero }, attempt: 3 };
|
||||||
|
|
||||||
pub async fn test_key_gen<C: Coin>() {
|
pub async fn test_key_gen<N: Network>() {
|
||||||
let mut entropies = HashMap::new();
|
let mut entropies = HashMap::new();
|
||||||
let mut dbs = HashMap::new();
|
let mut dbs = HashMap::new();
|
||||||
let mut key_gens = HashMap::new();
|
let mut key_gens = HashMap::new();
|
||||||
@@ -34,7 +34,7 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
entropies.insert(i, entropy);
|
entropies.insert(i, entropy);
|
||||||
let db = MemDb::new();
|
let db = MemDb::new();
|
||||||
dbs.insert(i, db.clone());
|
dbs.insert(i, db.clone());
|
||||||
key_gens.insert(i, KeyGen::<C, MemDb>::new(db, entropies[&i].clone()));
|
key_gens.insert(i, KeyGen::<N, MemDb>::new(db, entropies[&i].clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut all_commitments = HashMap::new();
|
let mut all_commitments = HashMap::new();
|
||||||
@@ -65,7 +65,7 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
// 3 ... are rebuilt once, one at each of the following steps
|
// 3 ... are rebuilt once, one at each of the following steps
|
||||||
let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| {
|
let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| {
|
||||||
key_gens.remove(&i);
|
key_gens.remove(&i);
|
||||||
key_gens.insert(i, KeyGen::<C, _>::new(dbs[&i].clone(), entropies[&i].clone()));
|
key_gens.insert(i, KeyGen::<N, _>::new(dbs[&i].clone(), entropies[&i].clone()));
|
||||||
};
|
};
|
||||||
rebuild(&mut key_gens, &dbs, 1);
|
rebuild(&mut key_gens, &dbs, 1);
|
||||||
rebuild(&mut key_gens, &dbs, 2);
|
rebuild(&mut key_gens, &dbs, 2);
|
||||||
@@ -102,7 +102,7 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||||
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } = key_gen
|
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen
|
||||||
.handle(
|
.handle(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
CoordinatorMessage::Shares {
|
CoordinatorMessage::Shares {
|
||||||
@@ -117,9 +117,9 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
{
|
{
|
||||||
assert_eq!(id, ID);
|
assert_eq!(id, ID);
|
||||||
if res.is_none() {
|
if res.is_none() {
|
||||||
res = Some((substrate_key, coin_key.clone()));
|
res = Some((substrate_key, network_key.clone()));
|
||||||
}
|
}
|
||||||
assert_eq!(res.as_ref().unwrap(), &(substrate_key, coin_key));
|
assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key));
|
||||||
} else {
|
} else {
|
||||||
panic!("didn't get key back");
|
panic!("didn't get key back");
|
||||||
}
|
}
|
||||||
@@ -134,7 +134,7 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
for i in 1 ..= 5 {
|
for i in 1 ..= 5 {
|
||||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||||
let KeyConfirmed { substrate_keys, coin_keys } = key_gen
|
let KeyConfirmed { substrate_keys, network_keys } = key_gen
|
||||||
.confirm(&mut txn, ID.set, (sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
|
.confirm(&mut txn, ID.set, (sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
|
||||||
.await;
|
.await;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
@@ -142,9 +142,12 @@ pub async fn test_key_gen<C: Coin>() {
|
|||||||
let params =
|
let params =
|
||||||
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();
|
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();
|
||||||
assert_eq!(substrate_keys.params(), params);
|
assert_eq!(substrate_keys.params(), params);
|
||||||
assert_eq!(coin_keys.params(), params);
|
assert_eq!(network_keys.params(), params);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
(substrate_keys.group_key().to_bytes(), coin_keys.group_key().to_bytes().as_ref().to_vec()),
|
(
|
||||||
|
substrate_keys.group_key().to_bytes(),
|
||||||
|
network_keys.group_key().to_bytes().as_ref().to_vec()
|
||||||
|
),
|
||||||
res
|
res
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#[cfg(feature = "bitcoin")]
|
#[cfg(feature = "bitcoin")]
|
||||||
mod bitcoin {
|
mod bitcoin {
|
||||||
use crate::coins::Bitcoin;
|
use crate::networks::Bitcoin;
|
||||||
|
|
||||||
async fn bitcoin() -> Bitcoin {
|
async fn bitcoin() -> Bitcoin {
|
||||||
let bitcoin = Bitcoin::new("http://serai:seraidex@127.0.0.1:18443".to_string()).await;
|
let bitcoin = Bitcoin::new("http://serai:seraidex@127.0.0.1:18443".to_string()).await;
|
||||||
@@ -8,7 +8,7 @@ mod bitcoin {
|
|||||||
bitcoin
|
bitcoin
|
||||||
}
|
}
|
||||||
|
|
||||||
test_coin!(
|
test_network!(
|
||||||
Bitcoin,
|
Bitcoin,
|
||||||
bitcoin,
|
bitcoin,
|
||||||
bitcoin_key_gen,
|
bitcoin_key_gen,
|
||||||
@@ -21,7 +21,7 @@ mod bitcoin {
|
|||||||
|
|
||||||
#[cfg(feature = "monero")]
|
#[cfg(feature = "monero")]
|
||||||
mod monero {
|
mod monero {
|
||||||
use crate::coins::{Coin, Monero};
|
use crate::networks::{Network, Monero};
|
||||||
|
|
||||||
async fn monero() -> Monero {
|
async fn monero() -> Monero {
|
||||||
let monero = Monero::new("http://127.0.0.1:18081".to_string());
|
let monero = Monero::new("http://127.0.0.1:18081".to_string());
|
||||||
@@ -31,7 +31,7 @@ mod monero {
|
|||||||
monero
|
monero
|
||||||
}
|
}
|
||||||
|
|
||||||
test_coin!(
|
test_network!(
|
||||||
Monero,
|
Monero,
|
||||||
monero,
|
monero,
|
||||||
monero_key_gen,
|
monero_key_gen,
|
||||||
|
|||||||
@@ -50,10 +50,10 @@ macro_rules! async_sequential {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! test_coin {
|
macro_rules! test_network {
|
||||||
(
|
(
|
||||||
$C: ident,
|
$N: ident,
|
||||||
$coin: ident,
|
$network: ident,
|
||||||
$key_gen: ident,
|
$key_gen: ident,
|
||||||
$scanner: ident,
|
$scanner: ident,
|
||||||
$signer: ident,
|
$signer: ident,
|
||||||
@@ -65,32 +65,32 @@ macro_rules! test_coin {
|
|||||||
// This doesn't interact with a node and accordingly doesn't need to be run sequentially
|
// This doesn't interact with a node and accordingly doesn't need to be run sequentially
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn $key_gen() {
|
async fn $key_gen() {
|
||||||
test_key_gen::<$C>().await;
|
test_key_gen::<$N>().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
sequential!();
|
sequential!();
|
||||||
|
|
||||||
async_sequential! {
|
async_sequential! {
|
||||||
async fn $scanner() {
|
async fn $scanner() {
|
||||||
test_scanner($coin().await).await;
|
test_scanner($network().await).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async_sequential! {
|
async_sequential! {
|
||||||
async fn $signer() {
|
async fn $signer() {
|
||||||
test_signer($coin().await).await;
|
test_signer($network().await).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async_sequential! {
|
async_sequential! {
|
||||||
async fn $wallet() {
|
async fn $wallet() {
|
||||||
test_wallet($coin().await).await;
|
test_wallet($network().await).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async_sequential! {
|
async_sequential! {
|
||||||
async fn $addresses() {
|
async fn $addresses() {
|
||||||
test_addresses($coin().await).await;
|
test_addresses($network().await).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -12,27 +12,27 @@ use serai_client::primitives::BlockHash;
|
|||||||
use serai_db::{DbTxn, Db, MemDb};
|
use serai_db::{DbTxn, Db, MemDb};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
coins::{OutputType, Output, Block, Coin},
|
networks::{OutputType, Output, Block, Network},
|
||||||
scanner::{ScannerEvent, Scanner, ScannerHandle},
|
scanner::{ScannerEvent, Scanner, ScannerHandle},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub async fn test_scanner<C: Coin>(coin: C) {
|
pub async fn test_scanner<N: Network>(network: N) {
|
||||||
let mut keys =
|
let mut keys =
|
||||||
frost::tests::key_gen::<_, C::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap();
|
frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap();
|
||||||
C::tweak_keys(&mut keys);
|
N::tweak_keys(&mut keys);
|
||||||
let group_key = keys.group_key();
|
let group_key = keys.group_key();
|
||||||
|
|
||||||
// Mine blocks so there's a confirmed block
|
// Mine blocks so there's a confirmed block
|
||||||
for _ in 0 .. C::CONFIRMATIONS {
|
for _ in 0 .. N::CONFIRMATIONS {
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let first = Arc::new(Mutex::new(true));
|
let first = Arc::new(Mutex::new(true));
|
||||||
let activation_number = coin.get_latest_block_number().await.unwrap();
|
let activation_number = network.get_latest_block_number().await.unwrap();
|
||||||
let db = MemDb::new();
|
let db = MemDb::new();
|
||||||
let new_scanner = || async {
|
let new_scanner = || async {
|
||||||
let mut db = db.clone();
|
let mut db = db.clone();
|
||||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone());
|
||||||
let mut first = first.lock().unwrap();
|
let mut first = first.lock().unwrap();
|
||||||
if *first {
|
if *first {
|
||||||
assert!(active_keys.is_empty());
|
assert!(active_keys.is_empty());
|
||||||
@@ -48,11 +48,11 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
|||||||
let scanner = new_scanner().await;
|
let scanner = new_scanner().await;
|
||||||
|
|
||||||
// Receive funds
|
// Receive funds
|
||||||
let block = coin.test_send(C::address(keys.group_key())).await;
|
let block = network.test_send(N::address(keys.group_key())).await;
|
||||||
let block_id = block.id();
|
let block_id = block.id();
|
||||||
|
|
||||||
// Verify the Scanner picked them up
|
// Verify the Scanner picked them up
|
||||||
let verify_event = |mut scanner: ScannerHandle<C, MemDb>| async {
|
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async {
|
||||||
let outputs =
|
let outputs =
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block { key, block, batch, outputs } => {
|
ScannerEvent::Block { key, block, batch, outputs } => {
|
||||||
@@ -80,7 +80,7 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
|||||||
let mut blocks = vec![];
|
let mut blocks = vec![];
|
||||||
let mut curr_block = activation_number + 1;
|
let mut curr_block = activation_number + 1;
|
||||||
loop {
|
loop {
|
||||||
let block = coin.get_block(curr_block).await.unwrap().id();
|
let block = network.get_block(curr_block).await.unwrap().id();
|
||||||
blocks.push(BlockHash(block.as_ref().try_into().unwrap()));
|
blocks.push(BlockHash(block.as_ref().try_into().unwrap()));
|
||||||
if block == block_id {
|
if block == block_id {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -13,18 +13,18 @@ use serai_db::{DbTxn, Db, MemDb};
|
|||||||
use messages::sign::*;
|
use messages::sign::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
Payment, Plan,
|
Payment, Plan,
|
||||||
coins::{Output, Transaction, Coin},
|
networks::{Output, Transaction, Network},
|
||||||
signer::{SignerEvent, Signer},
|
signer::{SignerEvent, Signer},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub async fn sign<C: Coin>(
|
pub async fn sign<N: Network>(
|
||||||
coin: C,
|
network: N,
|
||||||
mut keys_txs: HashMap<
|
mut keys_txs: HashMap<
|
||||||
Participant,
|
Participant,
|
||||||
(ThresholdKeys<C::Curve>, (C::SignableTransaction, C::Eventuality)),
|
(ThresholdKeys<N::Curve>, (N::SignableTransaction, N::Eventuality)),
|
||||||
>,
|
>,
|
||||||
) -> <C::Transaction as Transaction<C>>::Id {
|
) -> <N::Transaction as Transaction<N>>::Id {
|
||||||
let actual_id = SignId {
|
let actual_id = SignId {
|
||||||
key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(),
|
key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(),
|
||||||
id: [0xaa; 32],
|
id: [0xaa; 32],
|
||||||
@@ -45,7 +45,7 @@ pub async fn sign<C: Coin>(
|
|||||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||||
let keys = keys.remove(&i).unwrap();
|
let keys = keys.remove(&i).unwrap();
|
||||||
t = keys.params().t();
|
t = keys.params().t();
|
||||||
signers.insert(i, Signer::<_, MemDb>::new(coin.clone(), keys));
|
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), keys));
|
||||||
dbs.insert(i, MemDb::new());
|
dbs.insert(i, MemDb::new());
|
||||||
}
|
}
|
||||||
drop(keys);
|
drop(keys);
|
||||||
@@ -146,29 +146,29 @@ pub async fn sign<C: Coin>(
|
|||||||
tx_id.unwrap()
|
tx_id.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_signer<C: Coin>(coin: C) {
|
pub async fn test_signer<N: Network>(network: N) {
|
||||||
let mut keys = key_gen(&mut OsRng);
|
let mut keys = key_gen(&mut OsRng);
|
||||||
for (_, keys) in keys.iter_mut() {
|
for (_, keys) in keys.iter_mut() {
|
||||||
C::tweak_keys(keys);
|
N::tweak_keys(keys);
|
||||||
}
|
}
|
||||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
let outputs = coin.get_outputs(&coin.test_send(C::address(key)).await, key).await.unwrap();
|
let outputs = network.get_outputs(&network.test_send(N::address(key)).await, key).await.unwrap();
|
||||||
let sync_block = coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS;
|
let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS;
|
||||||
let fee = coin.get_fee().await;
|
let fee = network.get_fee().await;
|
||||||
|
|
||||||
let amount = 2 * C::DUST;
|
let amount = 2 * N::DUST;
|
||||||
let mut keys_txs = HashMap::new();
|
let mut keys_txs = HashMap::new();
|
||||||
let mut eventualities = vec![];
|
let mut eventualities = vec![];
|
||||||
for (i, keys) in keys.drain() {
|
for (i, keys) in keys.drain() {
|
||||||
let (signable, eventuality) = coin
|
let (signable, eventuality) = network
|
||||||
.prepare_send(
|
.prepare_send(
|
||||||
keys.clone(),
|
keys.clone(),
|
||||||
sync_block,
|
sync_block,
|
||||||
Plan {
|
Plan {
|
||||||
key,
|
key,
|
||||||
inputs: outputs.clone(),
|
inputs: outputs.clone(),
|
||||||
payments: vec![Payment { address: C::address(key), data: None, amount }],
|
payments: vec![Payment { address: N::address(key), data: None, amount }],
|
||||||
change: Some(key),
|
change: Some(key),
|
||||||
},
|
},
|
||||||
fee,
|
fee,
|
||||||
@@ -184,23 +184,26 @@ pub async fn test_signer<C: Coin>(coin: C) {
|
|||||||
|
|
||||||
// The signer may not publish the TX if it has a connection error
|
// The signer may not publish the TX if it has a connection error
|
||||||
// It doesn't fail in this case
|
// It doesn't fail in this case
|
||||||
let txid = sign(coin.clone(), keys_txs).await;
|
let txid = sign(network.clone(), keys_txs).await;
|
||||||
let tx = coin.get_transaction(&txid).await.unwrap();
|
let tx = network.get_transaction(&txid).await.unwrap();
|
||||||
assert_eq!(tx.id(), txid);
|
assert_eq!(tx.id(), txid);
|
||||||
// Mine a block, and scan it, to ensure that the TX actually made it on chain
|
// Mine a block, and scan it, to ensure that the TX actually made it on chain
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
let outputs = coin
|
let outputs = network
|
||||||
.get_outputs(&coin.get_block(coin.get_latest_block_number().await.unwrap()).await.unwrap(), key)
|
.get_outputs(
|
||||||
|
&network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(),
|
||||||
|
key,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(outputs.len(), 2);
|
assert_eq!(outputs.len(), 2);
|
||||||
// Adjust the amount for the fees
|
// Adjust the amount for the fees
|
||||||
let amount = amount - tx.fee(&coin).await;
|
let amount = amount - tx.fee(&network).await;
|
||||||
// Check either output since Monero will randomize its output order
|
// Check either output since Monero will randomize its output order
|
||||||
assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount));
|
assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount));
|
||||||
|
|
||||||
// Check the eventualities pass
|
// Check the eventualities pass
|
||||||
for eventuality in eventualities {
|
for eventuality in eventualities {
|
||||||
assert!(coin.confirm_completion(&eventuality, &tx));
|
assert!(network.confirm_completion(&eventuality, &tx));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,29 +10,29 @@ use serai_db::{DbTxn, Db, MemDb};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Payment, Plan,
|
Payment, Plan,
|
||||||
coins::{Output, Transaction, Block, Coin},
|
networks::{Output, Transaction, Block, Network},
|
||||||
scanner::{ScannerEvent, Scanner},
|
scanner::{ScannerEvent, Scanner},
|
||||||
scheduler::Scheduler,
|
scheduler::Scheduler,
|
||||||
tests::sign,
|
tests::sign,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Tests the Scanner, Scheduler, and Signer together
|
// Tests the Scanner, Scheduler, and Signer together
|
||||||
pub async fn test_wallet<C: Coin>(coin: C) {
|
pub async fn test_wallet<N: Network>(network: N) {
|
||||||
let mut keys = key_gen(&mut OsRng);
|
let mut keys = key_gen(&mut OsRng);
|
||||||
for (_, keys) in keys.iter_mut() {
|
for (_, keys) in keys.iter_mut() {
|
||||||
C::tweak_keys(keys);
|
N::tweak_keys(keys);
|
||||||
}
|
}
|
||||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
let mut db = MemDb::new();
|
let mut db = MemDb::new();
|
||||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone());
|
||||||
assert!(active_keys.is_empty());
|
assert!(active_keys.is_empty());
|
||||||
let (block_id, outputs) = {
|
let (block_id, outputs) = {
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
|
scanner.rotate_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
let block = coin.test_send(C::address(key)).await;
|
let block = network.test_send(N::address(key)).await;
|
||||||
let block_id = block.id();
|
let block_id = block.id();
|
||||||
|
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
@@ -51,11 +51,11 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||||||
|
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
let mut scheduler = Scheduler::new::<MemDb>(&mut txn, key);
|
let mut scheduler = Scheduler::new::<MemDb>(&mut txn, key);
|
||||||
let amount = 2 * C::DUST;
|
let amount = 2 * N::DUST;
|
||||||
let plans = scheduler.schedule::<MemDb>(
|
let plans = scheduler.schedule::<MemDb>(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
outputs.clone(),
|
outputs.clone(),
|
||||||
vec![Payment { address: C::address(key), data: None, amount }],
|
vec![Payment { address: N::address(key), data: None, amount }],
|
||||||
);
|
);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -63,7 +63,7 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||||||
vec![Plan {
|
vec![Plan {
|
||||||
key,
|
key,
|
||||||
inputs: outputs.clone(),
|
inputs: outputs.clone(),
|
||||||
payments: vec![Payment { address: C::address(key), data: None, amount }],
|
payments: vec![Payment { address: N::address(key), data: None, amount }],
|
||||||
change: Some(key),
|
change: Some(key),
|
||||||
}]
|
}]
|
||||||
);
|
);
|
||||||
@@ -71,16 +71,16 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||||||
{
|
{
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
plans[0].write(&mut buf).unwrap();
|
plans[0].write(&mut buf).unwrap();
|
||||||
assert_eq!(plans[0], Plan::<C>::read::<&[u8]>(&mut buf.as_ref()).unwrap());
|
assert_eq!(plans[0], Plan::<N>::read::<&[u8]>(&mut buf.as_ref()).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute the plan
|
// Execute the plan
|
||||||
let fee = coin.get_fee().await;
|
let fee = network.get_fee().await;
|
||||||
let mut keys_txs = HashMap::new();
|
let mut keys_txs = HashMap::new();
|
||||||
let mut eventualities = vec![];
|
let mut eventualities = vec![];
|
||||||
for (i, keys) in keys.drain() {
|
for (i, keys) in keys.drain() {
|
||||||
let (signable, eventuality) = coin
|
let (signable, eventuality) = network
|
||||||
.prepare_send(keys.clone(), coin.get_block_number(&block_id).await, plans[0].clone(), fee)
|
.prepare_send(keys.clone(), network.get_block_number(&block_id).await, plans[0].clone(), fee)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.0
|
.0
|
||||||
@@ -90,23 +90,23 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||||||
keys_txs.insert(i, (keys, (signable, eventuality)));
|
keys_txs.insert(i, (keys, (signable, eventuality)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let txid = sign(coin.clone(), keys_txs).await;
|
let txid = sign(network.clone(), keys_txs).await;
|
||||||
let tx = coin.get_transaction(&txid).await.unwrap();
|
let tx = network.get_transaction(&txid).await.unwrap();
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
let block_number = coin.get_latest_block_number().await.unwrap();
|
let block_number = network.get_latest_block_number().await.unwrap();
|
||||||
let block = coin.get_block(block_number).await.unwrap();
|
let block = network.get_block(block_number).await.unwrap();
|
||||||
let first_outputs = outputs;
|
let first_outputs = outputs;
|
||||||
let outputs = coin.get_outputs(&block, key).await.unwrap();
|
let outputs = network.get_outputs(&block, key).await.unwrap();
|
||||||
assert_eq!(outputs.len(), 2);
|
assert_eq!(outputs.len(), 2);
|
||||||
let amount = amount - tx.fee(&coin).await;
|
let amount = amount - tx.fee(&network).await;
|
||||||
assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount));
|
assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount));
|
||||||
|
|
||||||
for eventuality in eventualities {
|
for eventuality in eventualities {
|
||||||
assert!(coin.confirm_completion(&eventuality, &tx));
|
assert!(network.confirm_completion(&eventuality, &tx));
|
||||||
}
|
}
|
||||||
|
|
||||||
for _ in 1 .. C::CONFIRMATIONS {
|
for _ in 1 .. N::CONFIRMATIONS {
|
||||||
coin.mine_block().await;
|
network.mine_block().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ tokio = "1"
|
|||||||
[features]
|
[features]
|
||||||
serai = ["thiserror", "scale-info", "subxt"]
|
serai = ["thiserror", "scale-info", "subxt"]
|
||||||
|
|
||||||
coins = []
|
networks = []
|
||||||
bitcoin = ["coins", "dep:bitcoin"]
|
bitcoin = ["networks", "dep:bitcoin"]
|
||||||
monero = ["coins", "ciphersuite/ed25519", "monero-serai"]
|
monero = ["networks", "ciphersuite/ed25519", "monero-serai"]
|
||||||
|
|
||||||
# Assumes the default usage is to use Serai as a DEX, which doesn't actually
|
# Assumes the default usage is to use Serai as a DEX, which doesn't actually
|
||||||
# require connecting to a Serai node
|
# require connecting to a Serai node
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#[cfg(feature = "coins")]
|
#[cfg(feature = "networks")]
|
||||||
pub mod coins;
|
pub mod networks;
|
||||||
|
|
||||||
#[cfg(feature = "serai")]
|
#[cfg(feature = "serai")]
|
||||||
mod serai;
|
mod serai;
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
#[cfg(feature = "coins")]
|
#[cfg(feature = "networks")]
|
||||||
mod coins;
|
mod networks;
|
||||||
|
|||||||
@@ -20,8 +20,8 @@ pub use amount::*;
|
|||||||
mod block;
|
mod block;
|
||||||
pub use block::*;
|
pub use block::*;
|
||||||
|
|
||||||
mod coins;
|
mod networks;
|
||||||
pub use coins::*;
|
pub use networks::*;
|
||||||
|
|
||||||
mod balance;
|
mod balance;
|
||||||
pub use balance::*;
|
pub use balance::*;
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ pub fn network_rpc(network: NetworkId, ops: &DockerOperations, handle: &str) ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn confirmations(network: NetworkId) -> usize {
|
pub fn confirmations(network: NetworkId) -> usize {
|
||||||
use processor::coins::*;
|
use processor::networks::*;
|
||||||
match network {
|
match network {
|
||||||
NetworkId::Bitcoin => Bitcoin::CONFIRMATIONS,
|
NetworkId::Bitcoin => Bitcoin::CONFIRMATIONS,
|
||||||
NetworkId::Ethereum => todo!(),
|
NetworkId::Ethereum => todo!(),
|
||||||
@@ -313,7 +313,7 @@ impl Wallet {
|
|||||||
},
|
},
|
||||||
rpc::HttpRpc,
|
rpc::HttpRpc,
|
||||||
};
|
};
|
||||||
use processor::{additional_key, coins::Monero};
|
use processor::{additional_key, networks::Monero};
|
||||||
|
|
||||||
let rpc_url = network_rpc(NetworkId::Monero, ops, handle);
|
let rpc_url = network_rpc(NetworkId::Monero, ops, handle);
|
||||||
let rpc = HttpRpc::new(rpc_url).expect("couldn't connect to the Monero RPC");
|
let rpc = HttpRpc::new(rpc_url).expect("couldn't connect to the Monero RPC");
|
||||||
@@ -384,23 +384,27 @@ impl Wallet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn address(&self) -> ExternalAddress {
|
pub fn address(&self) -> ExternalAddress {
|
||||||
use serai_client::coins;
|
use serai_client::networks;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
Wallet::Bitcoin { public_key, .. } => {
|
Wallet::Bitcoin { public_key, .. } => {
|
||||||
use bitcoin_serai::bitcoin::{Network, Address};
|
use bitcoin_serai::bitcoin::{Network, Address};
|
||||||
ExternalAddress::new(
|
ExternalAddress::new(
|
||||||
coins::bitcoin::Address(Address::p2pkh(public_key, Network::Regtest)).try_into().unwrap(),
|
networks::bitcoin::Address(Address::p2pkh(public_key, Network::Regtest))
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
Wallet::Monero { view_pair, .. } => {
|
Wallet::Monero { view_pair, .. } => {
|
||||||
use monero_serai::wallet::address::{Network, AddressSpec};
|
use monero_serai::wallet::address::{Network, AddressSpec};
|
||||||
ExternalAddress::new(
|
ExternalAddress::new(
|
||||||
coins::monero::Address::new(view_pair.address(Network::Mainnet, AddressSpec::Standard))
|
networks::monero::Address::new(
|
||||||
.unwrap()
|
view_pair.address(Network::Mainnet, AddressSpec::Standard),
|
||||||
.try_into()
|
)
|
||||||
.unwrap(),
|
.unwrap()
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -273,7 +273,7 @@ fn batch_test() {
|
|||||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||||
context: SubstrateContext {
|
context: SubstrateContext {
|
||||||
serai_time,
|
serai_time,
|
||||||
coin_latest_finalized_block: batch.batch.block,
|
network_latest_finalized_block: batch.batch.block,
|
||||||
},
|
},
|
||||||
network,
|
network,
|
||||||
block: substrate_block_num + u64::from(i),
|
block: substrate_block_num + u64::from(i),
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
|
|||||||
|
|
||||||
// Send the shares
|
// Send the shares
|
||||||
let mut substrate_key = None;
|
let mut substrate_key = None;
|
||||||
let mut coin_key = None;
|
let mut network_key = None;
|
||||||
interact_with_all(
|
interact_with_all(
|
||||||
coordinators,
|
coordinators,
|
||||||
|participant| messages::key_gen::CoordinatorMessage::Shares {
|
|participant| messages::key_gen::CoordinatorMessage::Shares {
|
||||||
@@ -96,15 +96,15 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
|
|||||||
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
|
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
|
||||||
id: this_id,
|
id: this_id,
|
||||||
substrate_key: this_substrate_key,
|
substrate_key: this_substrate_key,
|
||||||
coin_key: this_coin_key,
|
network_key: this_network_key,
|
||||||
} => {
|
} => {
|
||||||
assert_eq!(this_id, id);
|
assert_eq!(this_id, id);
|
||||||
if substrate_key.is_none() {
|
if substrate_key.is_none() {
|
||||||
substrate_key = Some(this_substrate_key);
|
substrate_key = Some(this_substrate_key);
|
||||||
coin_key = Some(this_coin_key.clone());
|
network_key = Some(this_network_key.clone());
|
||||||
}
|
}
|
||||||
assert_eq!(substrate_key.unwrap(), this_substrate_key);
|
assert_eq!(substrate_key.unwrap(), this_substrate_key);
|
||||||
assert_eq!(coin_key.as_ref().unwrap(), &this_coin_key);
|
assert_eq!(network_key.as_ref().unwrap(), &this_network_key);
|
||||||
}
|
}
|
||||||
_ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"),
|
_ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"),
|
||||||
},
|
},
|
||||||
@@ -112,15 +112,15 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Confirm the key pair
|
// Confirm the key pair
|
||||||
// TODO: Beter document coin_latest_finalized_block's genesis state, and error if a set claims
|
// TODO: Beter document network_latest_finalized_block's genesis state, and error if a set claims
|
||||||
// [0; 32] was finalized
|
// [0; 32] was finalized
|
||||||
let context = SubstrateContext {
|
let context = SubstrateContext {
|
||||||
serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(),
|
serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(),
|
||||||
coin_latest_finalized_block: BlockHash([0; 32]),
|
network_latest_finalized_block: BlockHash([0; 32]),
|
||||||
};
|
};
|
||||||
|
|
||||||
let key_pair =
|
let key_pair =
|
||||||
(PublicKey::from_raw(substrate_key.unwrap()), coin_key.clone().unwrap().try_into().unwrap());
|
(PublicKey::from_raw(substrate_key.unwrap()), network_key.clone().unwrap().try_into().unwrap());
|
||||||
|
|
||||||
for coordinator in coordinators {
|
for coordinator in coordinators {
|
||||||
coordinator
|
coordinator
|
||||||
|
|||||||
@@ -158,7 +158,7 @@ fn send_test() {
|
|||||||
let key_pair = key_gen(&mut coordinators, network).await;
|
let key_pair = key_gen(&mut coordinators, network).await;
|
||||||
|
|
||||||
// Now we we have to mine blocks to activate the key
|
// Now we we have to mine blocks to activate the key
|
||||||
// (the first key is activated when the coin's block time exceeds the Serai time it was
|
// (the first key is activated when the network's block time exceeds the Serai time it was
|
||||||
// confirmed at)
|
// confirmed at)
|
||||||
|
|
||||||
for _ in 0 .. confirmations(network) {
|
for _ in 0 .. confirmations(network) {
|
||||||
@@ -209,7 +209,7 @@ fn send_test() {
|
|||||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||||
context: SubstrateContext {
|
context: SubstrateContext {
|
||||||
serai_time,
|
serai_time,
|
||||||
coin_latest_finalized_block: batch.batch.block,
|
network_latest_finalized_block: batch.batch.block,
|
||||||
},
|
},
|
||||||
network,
|
network,
|
||||||
block: substrate_block_num,
|
block: substrate_block_num,
|
||||||
|
|||||||
Reference in New Issue
Block a user