Add a TributaryReader which doesn't require a borrow to operate

Reduces lock contention.

Additionally changes block_key to include the genesis. While not technically
needed, the lack of genesis introduced a side effect where any Tributary on the
the database could return the block of any other Tributary. While that wasn't a
security issue, returning it suggested it was on-chain when it wasn't. This may
have been usable to create issues.
This commit is contained in:
Luke Parker
2023-04-24 06:50:40 -04:00
parent e0820759c0
commit e74b4ab94f
10 changed files with 178 additions and 154 deletions

View File

@@ -19,7 +19,7 @@ use serai_client::Serai;
use tokio::{sync::RwLock, time::sleep}; use tokio::{sync::RwLock, time::sleep};
use ::tributary::{ReadWrite, Block, Tributary}; use ::tributary::{ReadWrite, Block, Tributary, TributaryReader};
mod tributary; mod tributary;
use crate::tributary::{TributarySpec, Transaction}; use crate::tributary::{TributarySpec, Transaction};
@@ -65,7 +65,7 @@ async fn add_tributary<D: Db, P: P2p>(
p2p: P, p2p: P,
tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>, tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>,
spec: TributarySpec, spec: TributarySpec,
) { ) -> TributaryReader<D, Transaction> {
let tributary = Tributary::<_, Transaction, _>::new( let tributary = Tributary::<_, Transaction, _>::new(
// TODO: Use a db on a distinct volume // TODO: Use a db on a distinct volume
db, db,
@@ -78,10 +78,14 @@ async fn add_tributary<D: Db, P: P2p>(
.await .await
.unwrap(); .unwrap();
let reader = tributary.reader();
tributaries.insert( tributaries.insert(
tributary.genesis(), tributary.genesis(),
ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) }, ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) },
); );
reader
} }
pub async fn scan_substrate<D: Db, Pro: Processor>( pub async fn scan_substrate<D: Db, Pro: Processor>(
@@ -123,6 +127,11 @@ pub async fn scan_tributaries<D: Db, Pro: Processor, P: P2p>(
mut processor: Pro, mut processor: Pro,
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>, tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
) { ) {
let mut tributary_readers = vec![];
for ActiveTributary { spec, tributary } in tributaries.read().await.values() {
tributary_readers.push((spec.clone(), tributary.read().await.reader()));
}
// Handle new Tributary blocks // Handle new Tributary blocks
let mut tributary_db = tributary::TributaryDb::new(raw_db.clone()); let mut tributary_db = tributary::TributaryDb::new(raw_db.clone());
loop { loop {
@@ -133,28 +142,27 @@ pub async fn scan_tributaries<D: Db, Pro: Processor, P: P2p>(
{ {
let mut new_tributaries = NEW_TRIBUTARIES.write().await; let mut new_tributaries = NEW_TRIBUTARIES.write().await;
while let Some(spec) = new_tributaries.pop_front() { while let Some(spec) = new_tributaries.pop_front() {
add_tributary( let reader = add_tributary(
raw_db.clone(), raw_db.clone(),
key.clone(), key.clone(),
p2p.clone(), p2p.clone(),
// This is a short-lived write acquisition, which is why it should be fine // This is a short-lived write acquisition, which is why it should be fine
&mut *tributaries.write().await, &mut *tributaries.write().await,
spec, spec.clone(),
) )
.await; .await;
tributary_readers.push((spec, reader));
} }
} }
// TODO: Make a TributaryReader which only requires a DB handle and safely doesn't require for (spec, reader) in &tributary_readers {
// locks tributary::scanner::handle_new_blocks::<_, _>(
// Use that here
for ActiveTributary { spec, tributary } in tributaries.read().await.values() {
tributary::scanner::handle_new_blocks::<_, _, P>(
&mut tributary_db, &mut tributary_db,
&key, &key,
&mut processor, &mut processor,
spec, spec,
&*tributary.read().await, reader,
) )
.await; .await;
} }
@@ -177,8 +185,8 @@ pub async fn heartbeat_tributaries<D: Db, P: P2p>(
for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() { for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() {
let tributary = tributary.read().await; let tributary = tributary.read().await;
let tip = tributary.tip().await; let tip = tributary.tip().await;
let block_time = let block_time = SystemTime::UNIX_EPOCH +
SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); Duration::from_secs(tributary.reader().time_of_block(&tip).unwrap_or(0));
// Only trigger syncing if the block is more than a minute behind // Only trigger syncing if the block is more than a minute behind
if SystemTime::now() > (block_time + Duration::from_secs(60)) { if SystemTime::now() > (block_time + Duration::from_secs(60)) {
@@ -202,8 +210,8 @@ pub async fn handle_p2p<D: Db, P: P2p>(
let mut msg = p2p.receive().await; let mut msg = p2p.receive().await;
match msg.kind { match msg.kind {
P2pMessageKind::Tributary(genesis) => { P2pMessageKind::Tributary(genesis) => {
let tributaries_read = tributaries.read().await; let tributaries = tributaries.read().await;
let Some(tributary) = tributaries_read.get(&genesis) else { let Some(tributary) = tributaries.get(&genesis) else {
log::debug!("received p2p message for unknown network"); log::debug!("received p2p message for unknown network");
continue; continue;
}; };
@@ -215,8 +223,8 @@ pub async fn handle_p2p<D: Db, P: P2p>(
// TODO: Rate limit this // TODO: Rate limit this
P2pMessageKind::Heartbeat(genesis) => { P2pMessageKind::Heartbeat(genesis) => {
let tributaries_read = tributaries.read().await; let tributaries = tributaries.read().await;
let Some(tributary) = tributaries_read.get(&genesis) else { let Some(tributary) = tributaries.get(&genesis) else {
log::debug!("received heartbeat message for unknown network"); log::debug!("received heartbeat message for unknown network");
continue; continue;
}; };
@@ -264,12 +272,13 @@ pub async fn handle_p2p<D: Db, P: P2p>(
log::debug!("received heartbeat and selected to respond"); log::debug!("received heartbeat and selected to respond");
let reader = tributary_read.reader();
drop(tributary_read);
let mut latest = msg.msg.try_into().unwrap(); let mut latest = msg.msg.try_into().unwrap();
// TODO: All of these calls don't *actually* need a read lock, just access to a DB handle while let Some(next) = reader.block_after(&latest) {
// We can reduce lock contention accordingly let mut res = reader.block(&next).unwrap().serialize();
while let Some(next) = tributary_read.block_after(&latest) { res.extend(reader.commit(&next).unwrap());
let mut res = tributary_read.block(&next).unwrap().serialize();
res.extend(tributary_read.commit(&next).unwrap());
p2p.send(msg.sender, P2pMessageKind::Block(tributary.spec.genesis()), res).await; p2p.send(msg.sender, P2pMessageKind::Block(tributary.spec.genesis()), res).await;
latest = next; latest = next;
} }
@@ -320,8 +329,14 @@ pub async fn run<D: Db, Pro: Processor, P: P2p>(
// Reload active tributaries from the database // Reload active tributaries from the database
// TODO: Can MainDb take a borrow? // TODO: Can MainDb take a borrow?
for spec in MainDb(raw_db.clone()).active_tributaries().1 { for spec in MainDb(raw_db.clone()).active_tributaries().1 {
add_tributary(raw_db.clone(), key.clone(), p2p.clone(), &mut *tributaries.write().await, spec) let _ = add_tributary(
.await; raw_db.clone(),
key.clone(),
p2p.clone(),
&mut *tributaries.write().await,
spec,
)
.await;
} }
// Handle new blocks for each Tributary // Handle new blocks for each Tributary

View File

@@ -117,6 +117,7 @@ pub async fn wait_for_tx_inclusion(
mut last_checked: [u8; 32], mut last_checked: [u8; 32],
hash: [u8; 32], hash: [u8; 32],
) -> [u8; 32] { ) -> [u8; 32] {
let reader = tributary.reader();
loop { loop {
let tip = tributary.tip().await; let tip = tributary.tip().await;
if tip == last_checked { if tip == last_checked {
@@ -124,14 +125,14 @@ pub async fn wait_for_tx_inclusion(
continue; continue;
} }
let mut queue = vec![tributary.block(&tip).unwrap()]; let mut queue = vec![reader.block(&tip).unwrap()];
let mut block = None; let mut block = None;
while { while {
let parent = queue.last().unwrap().parent(); let parent = queue.last().unwrap().parent();
if parent == tributary.genesis() { if parent == tributary.genesis() {
false false
} else { } else {
block = Some(tributary.block(&parent).unwrap()); block = Some(reader.block(&parent).unwrap());
block.as_ref().unwrap().hash() != last_checked block.as_ref().unwrap().hash() != last_checked
} }
} { } {

View File

@@ -81,7 +81,7 @@ async fn dkg_test() {
) -> (TributaryDb<MemDb>, MemProcessor) { ) -> (TributaryDb<MemDb>, MemProcessor) {
let mut scanner_db = TributaryDb(MemDb::new()); let mut scanner_db = TributaryDb(MemDb::new());
let mut processor = MemProcessor::new(); let mut processor = MemProcessor::new();
handle_new_blocks(&mut scanner_db, key, &mut processor, spec, tributary).await; handle_new_blocks(&mut scanner_db, key, &mut processor, spec, &tributary.reader()).await;
(scanner_db, processor) (scanner_db, processor)
} }
@@ -96,7 +96,8 @@ async fn dkg_test() {
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await; sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
// Verify the scanner emits a KeyGen::Commitments message // Verify the scanner emits a KeyGen::Commitments message
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1.reader())
.await;
{ {
let mut msgs = processor.0.write().await; let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), expected_commitments); assert_eq!(msgs.pop_front().unwrap(), expected_commitments);
@@ -137,7 +138,8 @@ async fn dkg_test() {
} }
// With just 4 sets of shares, nothing should happen yet // With just 4 sets of shares, nothing should happen yet
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1.reader())
.await;
assert!(processor.0.write().await.is_empty()); assert!(processor.0.write().await.is_empty());
// Publish the final set of shares // Publish the final set of shares
@@ -168,7 +170,8 @@ async fn dkg_test() {
}; };
// Any scanner which has handled the prior blocks should only emit the new event // Any scanner which has handled the prior blocks should only emit the new event
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1.reader())
.await;
{ {
let mut msgs = processor.0.write().await; let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), shares_for(0)); assert_eq!(msgs.pop_front().unwrap(), shares_for(0));

View File

@@ -50,7 +50,7 @@ async fn handle_p2p_test() {
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
// Make sure every tributary has it // Make sure every tributary has it
for tributary in &tributaries { for tributary in &tributaries {
assert!(tributary.read().await.block(&tip).is_some()); assert!(tributary.read().await.reader().block(&tip).is_some());
} }
// Then after another block of time, we should have yet another new block // Then after another block of time, we should have yet another new block
@@ -59,6 +59,6 @@ async fn handle_p2p_test() {
assert!(new_tip != tip); assert!(new_tip != tip);
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
for tributary in tributaries { for tributary in tributaries {
assert!(tributary.read().await.block(&new_tip).is_some()); assert!(tributary.read().await.reader().block(&new_tip).is_some());
} }
} }

View File

@@ -62,7 +62,7 @@ async fn sync_test() {
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
// Make sure every tributary has it // Make sure every tributary has it
for tributary in &tributaries { for tributary in &tributaries {
assert!(tributary.read().await.block(&tip).is_some()); assert!(tributary.read().await.reader().block(&tip).is_some());
} }
// Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's // Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's
@@ -100,7 +100,9 @@ async fn sync_test() {
let tip = tributary.tip().await; let tip = tributary.tip().await;
let syncer_tip = syncer_tributary.tip().await; let syncer_tip = syncer_tributary.tip().await;
// Allow a one block tolerance in case of race conditions // Allow a one block tolerance in case of race conditions
assert!(HashSet::from([tip, tributary.block(&tip).unwrap().parent()]).contains(&syncer_tip)); assert!(
HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip)
);
syncer_tip syncer_tip
}; };
@@ -115,6 +117,7 @@ async fn sync_test() {
for _ in 0 .. 10 { for _ in 0 .. 10 {
let syncer_tributary = syncer_tributary.read().await; let syncer_tributary = syncer_tributary.read().await;
if syncer_tributary if syncer_tributary
.reader()
.parsed_commit(&syncer_tributary.tip().await) .parsed_commit(&syncer_tributary.tip().await)
.unwrap() .unwrap()
.validators .validators

View File

@@ -46,7 +46,7 @@ async fn tx_test() {
// All tributaries should have acknowledged this transaction in a block // All tributaries should have acknowledged this transaction in a block
for (_, tributary) in tributaries { for (_, tributary) in tributaries {
let block = tributary.block(&included_in).unwrap(); let block = tributary.reader().block(&included_in).unwrap();
assert_eq!(block.transactions, vec![tx.clone()]); assert_eq!(block.transactions, vec![tx.clone()]);
} }
} }

View File

@@ -5,7 +5,7 @@ use zeroize::Zeroizing;
use ciphersuite::{Ciphersuite, Ristretto}; use ciphersuite::{Ciphersuite, Ristretto};
use tributary::{Signed, Block, P2p, Tributary}; use tributary::{Signed, Block, TributaryReader};
use processor_messages::{ use processor_messages::{
key_gen::{self, KeyGenId}, key_gen::{self, KeyGenId},
@@ -22,14 +22,14 @@ use crate::{
}; };
// Handle a specific Tributary block // Handle a specific Tributary block
async fn handle_block<D: Db, Pro: Processor, P: P2p>( async fn handle_block<D: Db, Pro: Processor>(
db: &mut TributaryDb<D>, db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro, processor: &mut Pro,
spec: &TributarySpec, spec: &TributarySpec,
tributary: &Tributary<D, Transaction, P>,
block: Block<Transaction>, block: Block<Transaction>,
) { ) {
let genesis = spec.genesis();
let hash = block.hash(); let hash = block.hash();
let mut event_id = 0; let mut event_id = 0;
@@ -58,78 +58,75 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
} }
} }
let mut handle = let mut handle = |zone: Zone,
|zone: Zone, label, needed, id, attempt, mut bytes: Vec<u8>, signed: Signed| { label,
if zone == Zone::Dkg { needed,
// Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32] id,
assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID"); attempt,
} else if !TributaryDb::<D>::recognized_id(&txn, zone.label(), tributary.genesis(), id) { mut bytes: Vec<u8>,
signed: Signed| {
if zone == Zone::Dkg {
// Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32]
assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID");
} else if !TributaryDb::<D>::recognized_id(&txn, zone.label(), genesis, id) {
// TODO: Full slash
todo!();
}
// If they've already published a TX for this attempt, slash
if let Some(data) = TributaryDb::<D>::data(label, &txn, genesis, id, attempt, signed.signer)
{
if data != bytes {
// TODO: Full slash // TODO: Full slash
todo!(); todo!();
} }
// If they've already published a TX for this attempt, slash // TODO: Slash
if let Some(data) = return None;
TributaryDb::<D>::data(label, &txn, tributary.genesis(), id, attempt, signed.signer) }
{
if data != bytes {
// TODO: Full slash
todo!();
}
// TODO: Slash // If the attempt is lesser than the blockchain's, slash
return None; let curr_attempt = TributaryDb::<D>::attempt(&txn, genesis, id);
if attempt < curr_attempt {
// TODO: Slash for being late
return None;
}
if attempt > curr_attempt {
// TODO: Full slash
todo!();
}
// TODO: We can also full slash if shares before all commitments, or share before the
// necessary preprocesses
// Store this data
let received =
TributaryDb::<D>::set_data(label, &mut txn, genesis, id, attempt, signed.signer, &bytes);
// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
if received == needed {
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
if validator == signed.signer {
bytes.split_off(0)
} else if let Some(data) =
TributaryDb::<D>::data(label, &txn, genesis, id, attempt, validator)
{
data
} else {
continue;
},
);
} }
assert_eq!(data.len(), usize::from(needed));
// If the attempt is lesser than the blockchain's, slash return Some(data);
let curr_attempt = TributaryDb::<D>::attempt(&txn, tributary.genesis(), id); }
if attempt < curr_attempt { None
// TODO: Slash for being late };
return None;
}
if attempt > curr_attempt {
// TODO: Full slash
todo!();
}
// TODO: We can also full slash if shares before all commitments, or share before the
// necessary preprocesses
// Store this data
let received = TributaryDb::<D>::set_data(
label,
&mut txn,
tributary.genesis(),
id,
attempt,
signed.signer,
&bytes,
);
// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
if received == needed {
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
if validator == signed.signer {
bytes.split_off(0)
} else if let Some(data) =
TributaryDb::<D>::data(label, &txn, tributary.genesis(), id, attempt, validator)
{
data
} else {
continue;
},
);
}
assert_eq!(data.len(), usize::from(needed));
return Some(data);
}
None
};
match tx { match tx {
Transaction::DkgCommitments(attempt, bytes, signed) => { Transaction::DkgCommitments(attempt, bytes, signed) => {
@@ -177,27 +174,22 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
// If we didn't provide this transaction, we should halt until we do // If we didn't provide this transaction, we should halt until we do
// If we provided a distinct transaction, we should error // If we provided a distinct transaction, we should error
// If we did provide this transaction, we should've set the batch ID for the block // If we did provide this transaction, we should've set the batch ID for the block
let batch_id = TributaryDb::<D>::batch_id(&txn, tributary.genesis(), block).expect( let batch_id = TributaryDb::<D>::batch_id(&txn, genesis, block).expect(
"synced a tributary block finalizing a external block in a provided transaction \ "synced a tributary block finalizing a external block in a provided transaction \
despite us not providing that transaction", despite us not providing that transaction",
); );
TributaryDb::<D>::recognize_id( TributaryDb::<D>::recognize_id(&mut txn, Zone::Batch.label(), genesis, batch_id);
&mut txn,
Zone::Batch.label(),
tributary.genesis(),
batch_id,
);
} }
Transaction::SubstrateBlock(block) => { Transaction::SubstrateBlock(block) => {
let plan_ids = TributaryDb::<D>::plan_ids(&txn, tributary.genesis(), block).expect( let plan_ids = TributaryDb::<D>::plan_ids(&txn, genesis, block).expect(
"synced a tributary block finalizing a substrate block in a provided transaction \ "synced a tributary block finalizing a substrate block in a provided transaction \
despite us not providing that transaction", despite us not providing that transaction",
); );
for id in plan_ids { for id in plan_ids {
TributaryDb::<D>::recognize_id(&mut txn, Zone::Sign.label(), tributary.genesis(), id); TributaryDb::<D>::recognize_id(&mut txn, Zone::Sign.label(), genesis, id);
} }
} }
@@ -290,18 +282,19 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
// TODO: Trigger any necessary re-attempts // TODO: Trigger any necessary re-attempts
} }
pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>( pub async fn handle_new_blocks<D: Db, Pro: Processor>(
db: &mut TributaryDb<D>, db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro, processor: &mut Pro,
spec: &TributarySpec, spec: &TributarySpec,
tributary: &Tributary<D, Transaction, P>, tributary: &TributaryReader<D, Transaction>,
) { ) {
let mut last_block = db.last_block(tributary.genesis()); let genesis = tributary.genesis();
let mut last_block = db.last_block(genesis);
while let Some(next) = tributary.block_after(&last_block) { while let Some(next) = tributary.block_after(&last_block) {
let block = tributary.block(&next).unwrap(); let block = tributary.block(&next).unwrap();
handle_block(db, key, processor, spec, tributary, block).await; handle_block(db, key, processor, spec, block).await;
last_block = next; last_block = next;
db.set_last_block(tributary.genesis(), next); db.set_last_block(genesis, next);
} }
} }

View File

@@ -29,16 +29,14 @@ impl<D: Db, T: Transaction> Blockchain<D, T> {
fn block_number_key(&self) -> Vec<u8> { fn block_number_key(&self) -> Vec<u8> {
D::key(b"tributary_blockchain", b"block_number", self.genesis) D::key(b"tributary_blockchain", b"block_number", self.genesis)
} }
fn block_key(hash: &[u8; 32]) -> Vec<u8> { fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
// Since block hashes incorporate their parent, and the first parent is the genesis, this is D::key(b"tributary_blockchain", b"block", [genesis, hash].concat())
// fine not incorporating the hash unless there's a hash collision
D::key(b"tributary_blockchain", b"block", hash)
} }
fn commit_key(hash: &[u8; 32]) -> Vec<u8> { fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
D::key(b"tributary_blockchain", b"commit", hash) D::key(b"tributary_blockchain", b"commit", [genesis, hash].concat())
} }
fn block_after_key(hash: &[u8; 32]) -> Vec<u8> { fn block_after_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
D::key(b"tributary_blockchain", b"block_after", hash) D::key(b"tributary_blockchain", b"block_after", [genesis, hash].concat())
} }
fn next_nonce_key(&self, signer: &<Ristretto as Ciphersuite>::G) -> Vec<u8> { fn next_nonce_key(&self, signer: &<Ristretto as Ciphersuite>::G) -> Vec<u8> {
D::key( D::key(
@@ -95,21 +93,21 @@ impl<D: Db, T: Transaction> Blockchain<D, T> {
self.block_number self.block_number
} }
pub(crate) fn block_from_db(db: &D, block: &[u8; 32]) -> Option<Block<T>> { pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {
db.get(Self::block_key(block)) db.get(Self::block_key(&genesis, block))
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap()) .map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())
} }
pub(crate) fn commit_from_db(db: &D, block: &[u8; 32]) -> Option<Vec<u8>> { pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {
db.get(Self::commit_key(block)) db.get(Self::commit_key(&genesis, block))
} }
pub(crate) fn commit(&self, block: &[u8; 32]) -> Option<Vec<u8>> { pub(crate) fn commit(&self, block: &[u8; 32]) -> Option<Vec<u8>> {
Self::commit_from_db(self.db.as_ref().unwrap(), block) Self::commit_from_db(self.db.as_ref().unwrap(), self.genesis, block)
} }
pub(crate) fn block_after(db: &D, block: &[u8; 32]) -> Option<[u8; 32]> { pub(crate) fn block_after(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<[u8; 32]> {
db.get(Self::block_after_key(block)).map(|bytes| bytes.try_into().unwrap()) db.get(Self::block_after_key(&genesis, block)).map(|bytes| bytes.try_into().unwrap())
} }
pub(crate) fn add_transaction(&mut self, internal: bool, tx: T) -> bool { pub(crate) fn add_transaction(&mut self, internal: bool, tx: T) -> bool {
@@ -162,10 +160,10 @@ impl<D: Db, T: Transaction> Blockchain<D, T> {
self.block_number += 1; self.block_number += 1;
txn.put(self.block_number_key(), self.block_number.to_le_bytes()); txn.put(self.block_number_key(), self.block_number.to_le_bytes());
txn.put(Self::block_key(&self.tip), block.serialize()); txn.put(Self::block_key(&self.genesis, &self.tip), block.serialize());
txn.put(Self::commit_key(&self.tip), commit); txn.put(Self::commit_key(&self.genesis, &self.tip), commit);
txn.put(Self::block_after_key(&block.parent()), block.hash()); txn.put(Self::block_after_key(&self.genesis, &block.parent()), block.hash());
for tx in &block.transactions { for tx in &block.transactions {
match tx.kind() { match tx.kind() {

View File

@@ -1,4 +1,4 @@
use core::fmt::Debug; use core::{marker::PhantomData, fmt::Debug};
use std::{sync::Arc, io}; use std::{sync::Arc, io};
use async_trait::async_trait; use async_trait::async_trait;
@@ -150,24 +150,8 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
self.network.blockchain.read().await.tip() self.network.blockchain.read().await.tip()
} }
// Since these values are static, they can be safely read from the database without lock pub fn reader(&self) -> TributaryReader<D, T> {
// acquisition TributaryReader(self.db.clone(), self.genesis, PhantomData)
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
Blockchain::<D, T>::block_from_db(&self.db, hash)
}
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
Blockchain::<D, T>::commit_from_db(&self.db, hash)
}
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
}
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
Blockchain::<D, T>::block_after(&self.db, hash)
}
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
self
.commit(hash)
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
} }
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> { pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
@@ -266,3 +250,30 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
} }
} }
} }
#[derive(Clone)]
pub struct TributaryReader<D: Db, T: Transaction>(D, [u8; 32], PhantomData<T>);
impl<D: Db, T: Transaction> TributaryReader<D, T> {
pub fn genesis(&self) -> [u8; 32] {
self.1
}
// Since these values are static, they can be safely read from the database without lock
// acquisition
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
}
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
}
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
}
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
}
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
self
.commit(hash)
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
}
}

View File

@@ -43,7 +43,7 @@ fn block_addition() {
assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.tip(), block.hash());
assert_eq!(blockchain.block_number(), 1); assert_eq!(blockchain.block_number(), 1);
assert_eq!( assert_eq!(
Blockchain::<MemDb, SignedTransaction>::block_after(&db, &block.parent()).unwrap(), Blockchain::<MemDb, SignedTransaction>::block_after(&db, genesis, &block.parent()).unwrap(),
block.hash() block.hash()
); );
} }