mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Update serai-abi, and dependencies, to patch-polkadot-sdk
This commit is contained in:
@@ -22,13 +22,13 @@ workspace = true
|
||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"] }
|
||||
|
||||
bitvec = { version = "1", default-features = false, features = ["alloc"] }
|
||||
sp-core = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
|
||||
sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "799658329bf66a829e67b34a0e86376b63eb7d23", default-features = false }
|
||||
|
||||
serde = { version = "1", default-features = false, features = ["derive"], optional = true }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"], optional = true }
|
||||
scale-info = { version = "2", default-features = false, features = ["derive"], optional = true }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false, features = ["serde"], optional = true }
|
||||
frame-support = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false, optional = true }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "799658329bf66a829e67b34a0e86376b63eb7d23", default-features = false, features = ["serde"], optional = true }
|
||||
frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "799658329bf66a829e67b34a0e86376b63eb7d23", default-features = false, optional = true }
|
||||
|
||||
serai-primitives = { path = "../primitives", version = "0.1", default-features = false }
|
||||
|
||||
@@ -47,6 +47,6 @@ std = [
|
||||
|
||||
"serai-primitives/std",
|
||||
]
|
||||
substrate = ["serde", "scale", "scale-info", "sp-runtime", "frame-support", "serai-primitives/non_canonical_scale_derivations"]
|
||||
substrate = ["serde", "scale", "scale-info", "sp-runtime", "frame-support", "serai-primitives/serde", "serai-primitives/non_canonical_scale_derivations"]
|
||||
try-runtime = ["sp-runtime/try-runtime"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -105,7 +105,7 @@ pub struct Block {
|
||||
mod substrate {
|
||||
use core::fmt::Debug;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use scale::{Encode, Decode, DecodeWithMemTracking, IoReader};
|
||||
use scale_info::TypeInfo;
|
||||
|
||||
use sp_core::H256;
|
||||
@@ -116,6 +116,31 @@ mod substrate {
|
||||
|
||||
use super::*;
|
||||
|
||||
// Add `serde` implementations which treat self as a `Vec<u8>`
|
||||
impl sp_core::serde::Serialize for Transaction {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: sp_core::serde::Serializer,
|
||||
{
|
||||
<Vec<u8> as sp_core::serde::Serialize>::serialize(&self.encode(), serializer)
|
||||
}
|
||||
}
|
||||
impl<'de> sp_core::serde::Deserialize<'de> for Transaction {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: sp_core::serde::Deserializer<'de>,
|
||||
{
|
||||
use sp_core::serde::de::Error;
|
||||
let bytes = <Vec<u8> as sp_core::serde::Deserialize>::deserialize(deserializer)?;
|
||||
let mut reader = bytes.as_slice();
|
||||
let block = Self::decode(&mut IoReader(&mut reader)).map_err(D::Error::custom)?;
|
||||
if !reader.is_empty() {
|
||||
Err(D::Error::custom("extraneous bytes at end"))?;
|
||||
}
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
/// The digest for all of the Serai-specific header fields added before execution of the block.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SeraiPreExecutionDigest {
|
||||
@@ -149,7 +174,18 @@ mod substrate {
|
||||
///
|
||||
/// This is not considered part of the protocol proper and may be pruned in the future. It's
|
||||
/// solely considered used for consensus now.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, sp_runtime::Serialize)]
|
||||
#[derive(
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Debug,
|
||||
Encode,
|
||||
Decode,
|
||||
DecodeWithMemTracking,
|
||||
TypeInfo,
|
||||
sp_runtime::Serialize,
|
||||
sp_runtime::Deserialize,
|
||||
)]
|
||||
pub struct ConsensusV1 {
|
||||
/// The hash of the immediately preceding block.
|
||||
parent_hash: H256,
|
||||
@@ -164,14 +200,37 @@ mod substrate {
|
||||
}
|
||||
|
||||
/// A V1 header for a block, as needed by Substrate.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, sp_runtime::Serialize)]
|
||||
#[derive(
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Debug,
|
||||
Encode,
|
||||
Decode,
|
||||
DecodeWithMemTracking,
|
||||
TypeInfo,
|
||||
sp_runtime::Serialize,
|
||||
sp_runtime::Deserialize,
|
||||
)]
|
||||
pub struct SubstrateHeaderV1 {
|
||||
number: u64,
|
||||
consensus: ConsensusV1,
|
||||
}
|
||||
|
||||
/// A header for a block, as needed by Substrate.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, sp_runtime::Serialize)]
|
||||
#[derive(
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Debug,
|
||||
Encode,
|
||||
Decode,
|
||||
DecodeWithMemTracking,
|
||||
TypeInfo,
|
||||
sp_runtime::Serialize,
|
||||
sp_runtime::Deserialize,
|
||||
)]
|
||||
#[allow(clippy::cast_possible_truncation)]
|
||||
pub enum SubstrateHeader {
|
||||
/// A version 1 header.
|
||||
V1(SubstrateHeaderV1),
|
||||
@@ -226,12 +285,34 @@ mod substrate {
|
||||
}
|
||||
|
||||
/// A block, as needed by Substrate.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, sp_runtime::Serialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)]
|
||||
pub struct SubstrateBlock {
|
||||
header: SubstrateHeader,
|
||||
#[serde(skip)] // This makes this unsafe to deserialize, but we don't impl `Deserialize`
|
||||
transactions: Vec<Transaction>,
|
||||
}
|
||||
impl sp_core::serde::Serialize for SubstrateBlock {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: sp_core::serde::Serializer,
|
||||
{
|
||||
<Vec<u8> as sp_core::serde::Serialize>::serialize(&self.encode(), serializer)
|
||||
}
|
||||
}
|
||||
impl<'de> sp_core::serde::Deserialize<'de> for SubstrateBlock {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: sp_core::serde::Deserializer<'de>,
|
||||
{
|
||||
use sp_core::serde::de::Error;
|
||||
let bytes = <Vec<u8> as sp_core::serde::Deserialize>::deserialize(deserializer)?;
|
||||
let mut reader = bytes.as_slice();
|
||||
let block = Self::decode(&mut IoReader(&mut reader)).map_err(D::Error::custom)?;
|
||||
if !reader.is_empty() {
|
||||
Err(D::Error::custom("extraneous bytes at end"))?;
|
||||
}
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
impl HeaderTrait for SubstrateHeader {
|
||||
type Number = u64;
|
||||
|
||||
@@ -166,17 +166,20 @@ impl BorshDeserialize for Transaction {
|
||||
if len == 0 {
|
||||
let call = Call::deserialize_reader(reader)?;
|
||||
if call.is_signed() {
|
||||
#[allow(clippy::io_other_error)]
|
||||
Err(io::Error::new(io::ErrorKind::Other, "call was signed but marked unsigned"))?;
|
||||
}
|
||||
Ok(Transaction::Unsigned { call: UnsignedCall(call) })
|
||||
} else {
|
||||
if u32::from(len) > MAX_CALLS {
|
||||
#[allow(clippy::io_other_error)]
|
||||
Err(io::Error::new(io::ErrorKind::Other, "too many calls"))?;
|
||||
}
|
||||
let mut calls = BoundedVec::with_bounded_capacity(len.into());
|
||||
for _ in 0 .. len {
|
||||
let call = Call::deserialize_reader(reader)?;
|
||||
if !call.is_signed() {
|
||||
#[allow(clippy::io_other_error)]
|
||||
Err(io::Error::new(io::ErrorKind::Other, "call was unsigned but included as signed"))?;
|
||||
}
|
||||
calls.try_push(call).unwrap();
|
||||
@@ -254,12 +257,14 @@ mod substrate {
|
||||
fn read(&mut self, buf: &mut [u8]) -> borsh::io::Result<usize> {
|
||||
let remaining_len = self.0.remaining_len().map_err(|err| {
|
||||
self.1 = Some(err);
|
||||
#[allow(clippy::io_other_error)]
|
||||
borsh::io::Error::new(borsh::io::ErrorKind::Other, "")
|
||||
})?;
|
||||
// If we're still calling `read`, we try to read at least one more byte
|
||||
let to_read = buf.len().min(remaining_len.unwrap_or(1));
|
||||
self.0.read(&mut buf[.. to_read]).map_err(|err| {
|
||||
self.1 = Some(err);
|
||||
#[allow(clippy::io_other_error)]
|
||||
borsh::io::Error::new(borsh::io::ErrorKind::Other, "")
|
||||
})?;
|
||||
Ok(to_read)
|
||||
|
||||
Reference in New Issue
Block a user