Replace bincode with borsh (#452)

* Add SignalsConfig to chain_spec

* Correct multiexp feature flagging for rand_core std

* Remove bincode for borsh

Replaces a non-canonical encoding with a canonical encoding which additionally
should be faster.

Also fixes an issue where we used bincode in transcripts where it cannot be
trusted.

This ended up fixing a myriad of other bugs observed, unfortunately.
Accordingly, it either has to be merged or the bug fixes from it must be ported
to a new PR.

* Make serde optional, minimize usage

* Make borsh an optional dependency of substrate/ crates

* Remove unused dependencies

* Use [u8; 64] where possible in the processor messages

* Correct borsh feature flagging
This commit is contained in:
Luke Parker
2023-11-25 04:01:11 -05:00
committed by GitHub
parent 6b2876351e
commit b296be8515
52 changed files with 468 additions and 309 deletions

View File

@@ -41,9 +41,7 @@ sp-application-crypto = { git = "https://github.com/serai-dex/substrate", defaul
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
bincode = { version = "1", default-features = false }
serde = "1"
serde_json = { version = "1", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -21,7 +21,7 @@ use serai_env as env;
use scale::Encode;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
Public, Serai, SeraiInInstructions,
};
@@ -501,7 +501,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
&mut txn,
key,
spec,
&(Public(substrate_key), network_key.try_into().unwrap()),
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
id.attempt,
);
@@ -587,7 +587,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::SubstratePreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocesses,
data: preprocesses.into_iter().map(Into::into).collect(),
signed: Transaction::empty_signed(),
})]
}
@@ -612,7 +612,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
};
id.encode()
},
preprocesses,
preprocesses.into_iter().map(Into::into).collect(),
);
let intended = Transaction::Batch(
@@ -681,7 +681,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::SubstratePreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocesses,
data: preprocesses.into_iter().map(Into::into).collect(),
signed: Transaction::empty_signed(),
})]
}

View File

@@ -25,8 +25,8 @@ impl Processors for Arc<MessageQueue> {
let msg: CoordinatorMessage = msg.into();
let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = serde_json::to_string(&msg).unwrap();
self.queue(metadata, msg.into_bytes()).await;
let msg = borsh::to_vec(&msg).unwrap();
self.queue(metadata, msg).await;
}
async fn recv(&mut self, network: NetworkId) -> Message {
let msg = self.next(Service::Processor(network)).await;
@@ -36,7 +36,7 @@ impl Processors for Arc<MessageQueue> {
// Deserialize it into a ProcessorMessage
let msg: ProcessorMessage =
serde_json::from_slice(&msg.msg).expect("message wasn't a JSON-encoded ProcessorMessage");
borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage");
return Message { id, network, msg };
}

View File

@@ -10,6 +10,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use sp_runtime::traits::Verify;
use serai_client::validator_sets::primitives::KeyPair;
use tokio::time::sleep;
@@ -279,7 +280,7 @@ async fn dkg_test() {
OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
OsRng.fill_bytes(&mut network_key);
let key_pair = (serai_client::Public(substrate_key), network_key.try_into().unwrap());
let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
let mut txs = vec![];
for (i, key) in keys.iter().enumerate() {

View File

@@ -72,7 +72,7 @@ pub fn error_generating_key_pair<G: Get>(
// Sign a key pair which can't be valid
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
let key_pair = (Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
match DkgConfirmer::share(spec, key, attempt, preprocesses, &key_pair) {
Ok(mut share) => {
// Zeroize the share to ensure it's not accessed
@@ -312,7 +312,7 @@ pub(crate) async fn handle_application_tx<
}
let to = Participant::new(to).unwrap();
DkgShare::set(txn, genesis, from.into(), to.into(), &share);
DkgShare::set(txn, genesis, from.into(), to.into(), share);
}
}
}
@@ -556,9 +556,16 @@ pub(crate) async fn handle_application_tx<
}
Transaction::SubstratePreprocess(data) => {
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
let signer = data.signed.signer;
let Ok(_) = check_sign_data_len::<D>(txn, spec, signer, data.data.len()) else {
return;
};
for data in &data.data {
if data.len() != 64 {
fatal_slash::<D>(txn, genesis, signer.to_bytes(), "non-64-byte Substrate preprocess");
return;
}
}
match handle(
txn,
&DataSpecification {
@@ -578,7 +585,10 @@ pub(crate) async fn handle_application_tx<
spec.set().network,
coordinator::CoordinatorMessage::SubstratePreprocesses {
id: SubstrateSignId { key, id: data.plan, attempt: data.attempt },
preprocesses,
preprocesses: preprocesses
.into_iter()
.map(|(k, v)| (k, v.try_into().unwrap()))
.collect(),
},
)
.await;