refactor: clean up (#78)
* refactor client rpc * refactor node * remove unused deps * remove unused import * refactor consensus * consensus refactor * rename rpc traits * refactor execution
This commit is contained in:
parent
d06fb73803
commit
a9b34f3dee
|
@ -446,7 +446,6 @@ dependencies = [
|
|||
"hex",
|
||||
"jsonrpsee",
|
||||
"log",
|
||||
"openssl",
|
||||
"reqwest",
|
||||
"revm",
|
||||
"serde",
|
||||
|
@ -519,11 +518,9 @@ dependencies = [
|
|||
"ethers",
|
||||
"eyre",
|
||||
"hex",
|
||||
"openssl",
|
||||
"serde",
|
||||
"ssz-rs",
|
||||
"thiserror",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -552,13 +549,9 @@ dependencies = [
|
|||
"config",
|
||||
"ethers",
|
||||
"eyre",
|
||||
"futures",
|
||||
"hex",
|
||||
"jsonrpsee",
|
||||
"log",
|
||||
"openssl",
|
||||
"reqwest",
|
||||
"revm",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"ssz-rs",
|
||||
|
@ -1044,9 +1037,7 @@ dependencies = [
|
|||
"eyre",
|
||||
"futures",
|
||||
"hex",
|
||||
"jsonrpsee",
|
||||
"log",
|
||||
"openssl",
|
||||
"reqwest",
|
||||
"revm",
|
||||
"serde",
|
||||
|
@ -2231,15 +2222,6 @@ version = "0.1.5"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-src"
|
||||
version = "111.22.0+1.1.1q"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.75"
|
||||
|
@ -2249,7 +2231,6 @@ dependencies = [
|
|||
"autocfg",
|
||||
"cc",
|
||||
"libc",
|
||||
"openssl-src",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
|
|
@ -20,7 +20,6 @@ bytes = "1.2.1"
|
|||
futures = "0.3.23"
|
||||
toml = "0.5.9"
|
||||
log = "0.4.17"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
|
||||
common = { path = "../common" }
|
||||
consensus = { path = "../consensus" }
|
||||
|
|
|
@ -90,6 +90,8 @@ impl Node {
|
|||
self.payloads.pop_first();
|
||||
}
|
||||
|
||||
// only save one finalized block per epoch
|
||||
// finality updates only occur on epoch boundries
|
||||
while self.finalized_payloads.len() > usize::max(self.history_size / 32, 1) {
|
||||
self.finalized_payloads.pop_first();
|
||||
}
|
||||
|
@ -172,6 +174,7 @@ impl Node {
|
|||
.await
|
||||
}
|
||||
|
||||
// assumes tip of 1 gwei to prevent having to prove out every tx in the block
|
||||
pub fn get_gas_price(&self) -> Result<U256> {
|
||||
self.check_head_age()?;
|
||||
|
||||
|
@ -181,6 +184,7 @@ impl Node {
|
|||
Ok(base_fee + tip)
|
||||
}
|
||||
|
||||
// assumes tip of 1 gwei to prevent having to prove out every tx in the block
|
||||
pub fn get_priority_fee(&self) -> Result<U256> {
|
||||
let tip = U256::from(10_u64.pow(9));
|
||||
Ok(tip)
|
||||
|
@ -221,13 +225,13 @@ impl Node {
|
|||
.filter(|entry| &entry.1.block_hash.to_vec() == hash)
|
||||
.collect::<Vec<(&u64, &ExecutionPayload)>>();
|
||||
|
||||
match payloads.get(0) {
|
||||
Some(payload_entry) => self
|
||||
.execution
|
||||
if let Some(payload_entry) = payloads.get(0) {
|
||||
self.execution
|
||||
.get_block(payload_entry.1, full_tx)
|
||||
.await
|
||||
.map(|b| Some(b)),
|
||||
None => Ok(None),
|
||||
.map(|b| Some(b))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ use ethers::{
|
|||
types::{Address, Transaction, TransactionReceipt, H256},
|
||||
};
|
||||
use eyre::Result;
|
||||
use log::{debug, info, warn};
|
||||
use log::{info, warn};
|
||||
use std::{fmt::Display, net::SocketAddr, str::FromStr, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
|
@ -108,7 +108,6 @@ struct RpcInner {
|
|||
#[async_trait]
|
||||
impl EthRpcServer for RpcInner {
|
||||
async fn get_balance(&self, address: &str, block: BlockTag) -> Result<String, Error> {
|
||||
debug!("eth_getBalance");
|
||||
let address = convert_err(Address::from_str(address))?;
|
||||
let node = self.node.read().await;
|
||||
let balance = convert_err(node.get_balance(&address, &block).await)?;
|
||||
|
@ -133,7 +132,6 @@ impl EthRpcServer for RpcInner {
|
|||
}
|
||||
|
||||
async fn call(&self, opts: CallOpts, block: BlockTag) -> Result<String, Error> {
|
||||
debug!("eth_call");
|
||||
let node = self.node.read().await;
|
||||
let res = convert_err(node.call(&opts, &block))?;
|
||||
|
||||
|
@ -141,7 +139,6 @@ impl EthRpcServer for RpcInner {
|
|||
}
|
||||
|
||||
async fn estimate_gas(&self, opts: CallOpts) -> Result<String, Error> {
|
||||
debug!("eth_estimateGas");
|
||||
let node = self.node.read().await;
|
||||
let gas = convert_err(node.estimate_gas(&opts))?;
|
||||
|
||||
|
|
|
@ -11,6 +11,4 @@ serde = { version = "1.0.143", features = ["derive"] }
|
|||
hex = "0.4.3"
|
||||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
|
||||
ethers = "0.17.0"
|
||||
toml = "0.5.9"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
thiserror = "1.0.37"
|
||||
|
|
|
@ -15,15 +15,11 @@ hex = "0.4.3"
|
|||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
|
||||
blst = "0.3.10"
|
||||
ethers = "0.17.0"
|
||||
jsonrpsee = { version = "0.15.1", features = ["full"] }
|
||||
revm = "1.9.0"
|
||||
bytes = "1.2.1"
|
||||
futures = "0.3.23"
|
||||
toml = "0.5.9"
|
||||
async-trait = "0.1.57"
|
||||
log = "0.4.17"
|
||||
chrono = "0.4.22"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
thiserror = "1.0.37"
|
||||
|
||||
common = { path = "../common" }
|
||||
|
|
|
@ -2,8 +2,7 @@ use std::cmp;
|
|||
use std::sync::Arc;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use blst::min_pk::{PublicKey, Signature};
|
||||
use blst::BLST_ERROR;
|
||||
use blst::min_pk::PublicKey;
|
||||
use chrono::Duration;
|
||||
use eyre::eyre;
|
||||
use eyre::Result;
|
||||
|
@ -16,18 +15,22 @@ use config::Config;
|
|||
|
||||
use crate::errors::ConsensusError;
|
||||
|
||||
use super::rpc::Rpc;
|
||||
use super::rpc::ConsensusRpc;
|
||||
use super::types::*;
|
||||
use super::utils::*;
|
||||
|
||||
pub struct ConsensusClient<R: Rpc> {
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md
|
||||
// does not implement force updates
|
||||
|
||||
pub struct ConsensusClient<R: ConsensusRpc> {
|
||||
rpc: R,
|
||||
store: Store,
|
||||
store: LightClientStore,
|
||||
pub last_checkpoint: Option<Vec<u8>>,
|
||||
pub config: Arc<Config>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Store {
|
||||
struct LightClientStore {
|
||||
finalized_header: Header,
|
||||
current_sync_committee: SyncCommittee,
|
||||
next_sync_committee: Option<SyncCommittee>,
|
||||
|
@ -36,7 +39,7 @@ struct Store {
|
|||
current_max_active_participants: u64,
|
||||
}
|
||||
|
||||
impl<R: Rpc> ConsensusClient<R> {
|
||||
impl<R: ConsensusRpc> ConsensusClient<R> {
|
||||
pub async fn new(
|
||||
rpc: &str,
|
||||
checkpoint_block_root: &Vec<u8>,
|
||||
|
@ -67,7 +70,7 @@ impl<R: Rpc> ConsensusClient<R> {
|
|||
return Err(ConsensusError::InvalidCurrentSyncCommitteeProof.into());
|
||||
}
|
||||
|
||||
let store = Store {
|
||||
let store = LightClientStore {
|
||||
finalized_header: bootstrap.header.clone(),
|
||||
current_sync_committee: bootstrap.current_sync_committee,
|
||||
next_sync_committee: None,
|
||||
|
@ -167,6 +170,8 @@ impl<R: Rpc> ConsensusClient<R> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// implements checks from validate_light_client_update and process_light_client_update in the
|
||||
// specification
|
||||
fn verify_generic_update(&self, update: &GenericUpdate) -> Result<()> {
|
||||
let bits = get_bits(&update.sync_aggregate.sync_committee_bits);
|
||||
if bits == 0 {
|
||||
|
@ -237,13 +242,13 @@ impl<R: Rpc> ConsensusClient<R> {
|
|||
|
||||
let pks =
|
||||
get_participating_keys(sync_committee, &update.sync_aggregate.sync_committee_bits)?;
|
||||
let pks: Vec<&PublicKey> = pks.iter().map(|pk| pk).collect();
|
||||
|
||||
let header_root =
|
||||
bytes_to_bytes32(update.attested_header.clone().hash_tree_root()?.as_bytes());
|
||||
let signing_root = self.compute_committee_sign_root(header_root, update.signature_slot)?;
|
||||
let sig = &update.sync_aggregate.sync_committee_signature;
|
||||
let is_valid_sig = is_aggregate_valid(sig, signing_root.as_bytes(), &pks);
|
||||
let is_valid_sig = self.verify_sync_committee_signture(
|
||||
&pks,
|
||||
&update.attested_header,
|
||||
&update.sync_aggregate.sync_committee_signature,
|
||||
update.signature_slot,
|
||||
);
|
||||
|
||||
if !is_valid_sig {
|
||||
return Err(ConsensusError::InvalidSignature.into());
|
||||
|
@ -267,13 +272,15 @@ impl<R: Rpc> ConsensusClient<R> {
|
|||
self.verify_generic_update(&update)
|
||||
}
|
||||
|
||||
// implements state changes from apply_light_client_update and process_light_client_update in
|
||||
// the specification
|
||||
fn apply_generic_update(&mut self, update: &GenericUpdate) {
|
||||
let committee_bits = get_bits(&update.sync_aggregate.sync_committee_bits);
|
||||
|
||||
self.store.current_max_active_participants =
|
||||
u64::max(self.store.current_max_active_participants, committee_bits);
|
||||
|
||||
let should_update_optimistic = committee_bits > self.safety_theshhold()
|
||||
let should_update_optimistic = committee_bits > self.safety_threshold()
|
||||
&& update.attested_header.slot > self.store.optimistic_header.slot;
|
||||
|
||||
if should_update_optimistic {
|
||||
|
@ -393,13 +400,36 @@ impl<R: Rpc> ConsensusClient<R> {
|
|||
update.next_sync_committee.is_some() && update.next_sync_committee_branch.is_some()
|
||||
}
|
||||
|
||||
fn safety_theshhold(&self) -> u64 {
|
||||
fn safety_threshold(&self) -> u64 {
|
||||
cmp::max(
|
||||
self.store.current_max_active_participants,
|
||||
self.store.previous_max_active_participants,
|
||||
) / 2
|
||||
}
|
||||
|
||||
fn verify_sync_committee_signture(
|
||||
&self,
|
||||
pks: &Vec<PublicKey>,
|
||||
attested_header: &Header,
|
||||
signature: &SignatureBytes,
|
||||
signature_slot: u64,
|
||||
) -> bool {
|
||||
let res: Result<bool> = (move || {
|
||||
let pks: Vec<&PublicKey> = pks.iter().map(|pk| pk).collect();
|
||||
let header_root =
|
||||
bytes_to_bytes32(attested_header.clone().hash_tree_root()?.as_bytes());
|
||||
let signing_root = self.compute_committee_sign_root(header_root, signature_slot)?;
|
||||
|
||||
Ok(is_aggregate_valid(signature, signing_root.as_bytes(), &pks))
|
||||
})();
|
||||
|
||||
if let Ok(is_valid) = res {
|
||||
is_valid
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_committee_sign_root(&self, header: Bytes32, slot: u64) -> Result<Node> {
|
||||
let genesis_root = self.config.chain.genesis_root.to_vec().try_into().unwrap();
|
||||
|
||||
|
@ -479,42 +509,12 @@ fn get_bits(bitfield: &Bitvector<512>) -> u64 {
|
|||
count
|
||||
}
|
||||
|
||||
fn is_aggregate_valid(sig_bytes: &SignatureBytes, msg: &[u8], pks: &[&PublicKey]) -> bool {
|
||||
let dst: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
|
||||
let sig_res = Signature::from_bytes(&sig_bytes);
|
||||
match sig_res {
|
||||
Ok(sig) => sig.fast_aggregate_verify(true, msg, dst, &pks) == BLST_ERROR::BLST_SUCCESS,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_finality_proof_valid(
|
||||
attested_header: &Header,
|
||||
finality_header: &mut Header,
|
||||
finality_branch: &Vec<Bytes32>,
|
||||
) -> bool {
|
||||
let finality_header_hash_res = finality_header.hash_tree_root();
|
||||
if finality_header_hash_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let attested_header_state_root_res = bytes32_to_node(&attested_header.state_root);
|
||||
if attested_header_state_root_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let finality_branch_res = branch_to_nodes(finality_branch.clone());
|
||||
if finality_branch_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
is_valid_merkle_branch(
|
||||
&finality_header_hash_res.unwrap(),
|
||||
finality_branch_res.unwrap().iter(),
|
||||
6,
|
||||
41,
|
||||
&attested_header_state_root_res.unwrap(),
|
||||
)
|
||||
is_proof_valid(attested_header, finality_header, finality_branch, 6, 41)
|
||||
}
|
||||
|
||||
fn is_next_committee_proof_valid(
|
||||
|
@ -522,27 +522,12 @@ fn is_next_committee_proof_valid(
|
|||
next_committee: &mut SyncCommittee,
|
||||
next_committee_branch: &Vec<Bytes32>,
|
||||
) -> bool {
|
||||
let next_committee_hash_res = next_committee.hash_tree_root();
|
||||
if next_committee_hash_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let attested_header_state_root_res = bytes32_to_node(&attested_header.state_root);
|
||||
if attested_header_state_root_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let next_committee_branch_res = branch_to_nodes(next_committee_branch.clone());
|
||||
if next_committee_branch_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
is_valid_merkle_branch(
|
||||
&next_committee_hash_res.unwrap(),
|
||||
next_committee_branch_res.unwrap().iter(),
|
||||
is_proof_valid(
|
||||
attested_header,
|
||||
next_committee,
|
||||
next_committee_branch,
|
||||
5,
|
||||
23,
|
||||
&attested_header_state_root_res.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -551,86 +536,15 @@ fn is_current_committee_proof_valid(
|
|||
current_committee: &mut SyncCommittee,
|
||||
current_committee_branch: &Vec<Bytes32>,
|
||||
) -> bool {
|
||||
let next_committee_hash_res = current_committee.hash_tree_root();
|
||||
if next_committee_hash_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let attested_header_state_root_res = bytes32_to_node(&attested_header.state_root);
|
||||
if attested_header_state_root_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let next_committee_branch_res = branch_to_nodes(current_committee_branch.clone());
|
||||
if next_committee_branch_res.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
is_valid_merkle_branch(
|
||||
&next_committee_hash_res.unwrap(),
|
||||
next_committee_branch_res.unwrap().iter(),
|
||||
is_proof_valid(
|
||||
attested_header,
|
||||
current_committee,
|
||||
current_committee_branch,
|
||||
5,
|
||||
22,
|
||||
&attested_header_state_root_res.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
fn calc_sync_period(slot: u64) -> u64 {
|
||||
let epoch = slot / 32;
|
||||
epoch / 256
|
||||
}
|
||||
|
||||
fn branch_to_nodes(branch: Vec<Bytes32>) -> Result<Vec<Node>> {
|
||||
branch
|
||||
.iter()
|
||||
.map(|elem| bytes32_to_node(elem))
|
||||
.collect::<Result<Vec<Node>>>()
|
||||
}
|
||||
|
||||
#[derive(SimpleSerialize, Default, Debug)]
|
||||
struct SigningData {
|
||||
object_root: Bytes32,
|
||||
domain: Bytes32,
|
||||
}
|
||||
|
||||
#[derive(SimpleSerialize, Default, Debug)]
|
||||
struct ForkData {
|
||||
current_version: Vector<u8, 4>,
|
||||
genesis_validator_root: Bytes32,
|
||||
}
|
||||
|
||||
fn compute_signing_root(object_root: Bytes32, domain: Bytes32) -> Result<Node> {
|
||||
let mut data = SigningData {
|
||||
object_root,
|
||||
domain,
|
||||
};
|
||||
Ok(data.hash_tree_root()?)
|
||||
}
|
||||
|
||||
fn compute_domain(
|
||||
domain_type: &[u8],
|
||||
fork_version: Vector<u8, 4>,
|
||||
genesis_root: Bytes32,
|
||||
) -> Result<Bytes32> {
|
||||
let fork_data_root = compute_fork_data_root(fork_version, genesis_root)?;
|
||||
let start = domain_type;
|
||||
let end = &fork_data_root.as_bytes()[..28];
|
||||
let d = [start, end].concat();
|
||||
Ok(d.to_vec().try_into().unwrap())
|
||||
}
|
||||
|
||||
fn compute_fork_data_root(
|
||||
current_version: Vector<u8, 4>,
|
||||
genesis_validator_root: Bytes32,
|
||||
) -> Result<Node> {
|
||||
let current_version = current_version.try_into()?;
|
||||
let mut fork_data = ForkData {
|
||||
current_version,
|
||||
genesis_validator_root,
|
||||
};
|
||||
Ok(fork_data.hash_tree_root()?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
@ -640,7 +554,7 @@ mod tests {
|
|||
use crate::{
|
||||
consensus::calc_sync_period,
|
||||
errors::ConsensusError,
|
||||
rpc::{mock_rpc::MockRpc, Rpc},
|
||||
rpc::{mock_rpc::MockRpc, ConsensusRpc},
|
||||
types::Header,
|
||||
ConsensusClient,
|
||||
};
|
||||
|
@ -688,7 +602,7 @@ mod tests {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_verify_upadate_invlaid_finality() {
|
||||
async fn test_verify_update_invalid_finality() {
|
||||
let client = get_client().await;
|
||||
let period = calc_sync_period(client.store.finalized_header.slot);
|
||||
let updates = client.rpc.get_updates(period).await.unwrap();
|
||||
|
@ -730,7 +644,7 @@ mod tests {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_verify_finality_invlaid_finality() {
|
||||
async fn test_verify_finality_invalid_finality() {
|
||||
let mut client = get_client().await;
|
||||
client.sync().await.unwrap();
|
||||
|
||||
|
@ -745,7 +659,7 @@ mod tests {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_verify_finality_invlaid_sig() {
|
||||
async fn test_verify_finality_invalid_sig() {
|
||||
let mut client = get_client().await;
|
||||
client.sync().await.unwrap();
|
||||
|
||||
|
|
|
@ -4,3 +4,5 @@ pub mod types;
|
|||
|
||||
mod consensus;
|
||||
pub use crate::consensus::*;
|
||||
|
||||
mod utils;
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::{fs::read_to_string, path::PathBuf};
|
|||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
|
||||
use super::Rpc;
|
||||
use super::ConsensusRpc;
|
||||
use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update};
|
||||
|
||||
pub struct MockRpc {
|
||||
|
@ -11,7 +11,7 @@ pub struct MockRpc {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Rpc for MockRpc {
|
||||
impl ConsensusRpc for MockRpc {
|
||||
fn new(path: &str) -> Self {
|
||||
MockRpc {
|
||||
testdata: PathBuf::from(path),
|
||||
|
|
|
@ -6,8 +6,9 @@ use eyre::Result;
|
|||
|
||||
use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update};
|
||||
|
||||
// implements https://github.com/ethereum/beacon-APIs/tree/master/apis/beacon/light_client
|
||||
#[async_trait]
|
||||
pub trait Rpc {
|
||||
pub trait ConsensusRpc {
|
||||
fn new(path: &str) -> Self;
|
||||
async fn get_bootstrap(&self, block_root: &Vec<u8>) -> Result<Bootstrap>;
|
||||
async fn get_updates(&self, period: u64) -> Result<Vec<Update>>;
|
||||
|
|
|
@ -2,7 +2,7 @@ use async_trait::async_trait;
|
|||
use common::errors::RpcError;
|
||||
use eyre::Result;
|
||||
|
||||
use super::Rpc;
|
||||
use super::ConsensusRpc;
|
||||
use crate::types::*;
|
||||
|
||||
pub struct NimbusRpc {
|
||||
|
@ -10,7 +10,7 @@ pub struct NimbusRpc {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Rpc for NimbusRpc {
|
||||
impl ConsensusRpc for NimbusRpc {
|
||||
fn new(rpc: &str) -> Self {
|
||||
NimbusRpc {
|
||||
rpc: rpc.to_string(),
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
use blst::{
|
||||
min_pk::{PublicKey, Signature},
|
||||
BLST_ERROR,
|
||||
};
|
||||
use common::{types::Bytes32, utils::bytes32_to_node};
|
||||
use eyre::Result;
|
||||
use ssz_rs::prelude::*;
|
||||
|
||||
use crate::types::{Header, SignatureBytes};
|
||||
|
||||
pub fn calc_sync_period(slot: u64) -> u64 {
|
||||
let epoch = slot / 32; // 32 slots per epoch
|
||||
epoch / 256 // 256 epochs per sync committee
|
||||
}
|
||||
|
||||
pub fn is_aggregate_valid(sig_bytes: &SignatureBytes, msg: &[u8], pks: &[&PublicKey]) -> bool {
|
||||
let dst: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
|
||||
let sig_res = Signature::from_bytes(&sig_bytes);
|
||||
match sig_res {
|
||||
Ok(sig) => sig.fast_aggregate_verify(true, msg, dst, &pks) == BLST_ERROR::BLST_SUCCESS,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_proof_valid<L: Merkleized>(
|
||||
attested_header: &Header,
|
||||
leaf_object: &mut L,
|
||||
branch: &Vec<Bytes32>,
|
||||
depth: usize,
|
||||
index: usize,
|
||||
) -> bool {
|
||||
let res: Result<bool> = (move || {
|
||||
let leaf_hash = leaf_object.hash_tree_root()?;
|
||||
let state_root = bytes32_to_node(&attested_header.state_root)?;
|
||||
let branch = branch_to_nodes(branch.to_vec())?;
|
||||
|
||||
let is_valid = is_valid_merkle_branch(&leaf_hash, branch.iter(), depth, index, &state_root);
|
||||
Ok(is_valid)
|
||||
})();
|
||||
|
||||
if let Ok(is_valid) = res {
|
||||
is_valid
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(SimpleSerialize, Default, Debug)]
|
||||
struct SigningData {
|
||||
object_root: Bytes32,
|
||||
domain: Bytes32,
|
||||
}
|
||||
|
||||
#[derive(SimpleSerialize, Default, Debug)]
|
||||
struct ForkData {
|
||||
current_version: Vector<u8, 4>,
|
||||
genesis_validator_root: Bytes32,
|
||||
}
|
||||
|
||||
pub fn compute_signing_root(object_root: Bytes32, domain: Bytes32) -> Result<Node> {
|
||||
let mut data = SigningData {
|
||||
object_root,
|
||||
domain,
|
||||
};
|
||||
Ok(data.hash_tree_root()?)
|
||||
}
|
||||
|
||||
pub fn compute_domain(
|
||||
domain_type: &[u8],
|
||||
fork_version: Vector<u8, 4>,
|
||||
genesis_root: Bytes32,
|
||||
) -> Result<Bytes32> {
|
||||
let fork_data_root = compute_fork_data_root(fork_version, genesis_root)?;
|
||||
let start = domain_type;
|
||||
let end = &fork_data_root.as_bytes()[..28];
|
||||
let d = [start, end].concat();
|
||||
Ok(d.to_vec().try_into().unwrap())
|
||||
}
|
||||
|
||||
fn compute_fork_data_root(
|
||||
current_version: Vector<u8, 4>,
|
||||
genesis_validator_root: Bytes32,
|
||||
) -> Result<Node> {
|
||||
let current_version = current_version.try_into()?;
|
||||
let mut fork_data = ForkData {
|
||||
current_version,
|
||||
genesis_validator_root,
|
||||
};
|
||||
Ok(fork_data.hash_tree_root()?)
|
||||
}
|
||||
|
||||
pub fn branch_to_nodes(branch: Vec<Bytes32>) -> Result<Vec<Node>> {
|
||||
branch
|
||||
.iter()
|
||||
.map(|elem| bytes32_to_node(elem))
|
||||
.collect::<Result<Vec<Node>>>()
|
||||
}
|
|
@ -15,14 +15,12 @@ hex = "0.4.3"
|
|||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
|
||||
blst = "0.3.10"
|
||||
ethers = "0.17.0"
|
||||
jsonrpsee = { version = "0.15.1", features = ["full"] }
|
||||
revm = "1.9.0"
|
||||
bytes = "1.2.1"
|
||||
futures = "0.3.23"
|
||||
toml = "0.5.9"
|
||||
triehash-ethereum = { git = "https://github.com/openethereum/parity-ethereum" }
|
||||
async-trait = "0.1.57"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
log = "0.4.17"
|
||||
thiserror = "1.0.37"
|
||||
|
||||
|
|
|
@ -15,18 +15,18 @@ use tokio::runtime::Runtime;
|
|||
use consensus::types::ExecutionPayload;
|
||||
|
||||
use crate::{
|
||||
rpc::Rpc,
|
||||
rpc::ExecutionRpc,
|
||||
types::{Account, CallOpts},
|
||||
};
|
||||
|
||||
use super::ExecutionClient;
|
||||
|
||||
pub struct Evm<R: Rpc> {
|
||||
pub struct Evm<R: ExecutionRpc> {
|
||||
evm: EVM<ProofDB<R>>,
|
||||
chain_id: u64,
|
||||
}
|
||||
|
||||
impl<R: Rpc> Evm<R> {
|
||||
impl<R: ExecutionRpc> Evm<R> {
|
||||
pub fn new(execution: ExecutionClient<R>, payload: ExecutionPayload, chain_id: u64) -> Self {
|
||||
let mut evm: EVM<ProofDB<R>> = EVM::new();
|
||||
let db = ProofDB::new(execution, payload);
|
||||
|
@ -64,6 +64,7 @@ impl<R: Rpc> Evm<R> {
|
|||
return Err(eyre::eyre!(err.clone()));
|
||||
}
|
||||
|
||||
// overestimate to avoid out of gas reverts
|
||||
let gas_scaled = (1.10 * gas as f64) as u64;
|
||||
Ok(gas_scaled)
|
||||
}
|
||||
|
@ -158,14 +159,14 @@ impl<R: Rpc> Evm<R> {
|
|||
}
|
||||
}
|
||||
|
||||
struct ProofDB<R: Rpc> {
|
||||
struct ProofDB<R: ExecutionRpc> {
|
||||
execution: ExecutionClient<R>,
|
||||
payload: ExecutionPayload,
|
||||
accounts: HashMap<Address, Account>,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
impl<R: Rpc> ProofDB<R> {
|
||||
impl<R: ExecutionRpc> ProofDB<R> {
|
||||
pub fn new(execution: ExecutionClient<R>, payload: ExecutionPayload) -> Self {
|
||||
ProofDB {
|
||||
execution,
|
||||
|
@ -205,7 +206,7 @@ impl<R: Rpc> ProofDB<R> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<R: Rpc> Database for ProofDB<R> {
|
||||
impl<R: ExecutionRpc> Database for ProofDB<R> {
|
||||
fn basic(&mut self, address: H160) -> AccountInfo {
|
||||
if is_precompile(&address) {
|
||||
return AccountInfo::default();
|
||||
|
|
|
@ -18,17 +18,17 @@ use crate::errors::ExecutionError;
|
|||
use crate::types::Transactions;
|
||||
|
||||
use super::proof::{encode_account, verify_proof};
|
||||
use super::rpc::Rpc;
|
||||
use super::rpc::ExecutionRpc;
|
||||
use super::types::{Account, ExecutionBlock};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ExecutionClient<R: Rpc> {
|
||||
pub struct ExecutionClient<R: ExecutionRpc> {
|
||||
pub rpc: R,
|
||||
}
|
||||
|
||||
impl<R: Rpc> ExecutionClient<R> {
|
||||
impl<R: ExecutionRpc> ExecutionClient<R> {
|
||||
pub fn new(rpc: &str) -> Result<Self> {
|
||||
let rpc = Rpc::new(rpc)?;
|
||||
let rpc = ExecutionRpc::new(rpc)?;
|
||||
Ok(ExecutionClient { rpc })
|
||||
}
|
||||
|
||||
|
|
|
@ -11,11 +11,10 @@ use ethers::types::{
|
|||
TransactionReceipt, H256, U256,
|
||||
};
|
||||
use eyre::Result;
|
||||
use log::trace;
|
||||
|
||||
use crate::types::CallOpts;
|
||||
|
||||
use super::Rpc;
|
||||
use super::ExecutionRpc;
|
||||
|
||||
pub struct HttpRpc {
|
||||
url: String,
|
||||
|
@ -29,7 +28,7 @@ impl Clone for HttpRpc {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Rpc for HttpRpc {
|
||||
impl ExecutionRpc for HttpRpc {
|
||||
fn new(rpc: &str) -> Result<Self> {
|
||||
let http = Http::from_str(rpc)?;
|
||||
let mut client = RetryClient::new(http, Box::new(HttpRateLimitRetryPolicy), 100, 250);
|
||||
|
@ -47,7 +46,6 @@ impl Rpc for HttpRpc {
|
|||
slots: &[H256],
|
||||
block: u64,
|
||||
) -> Result<EIP1186ProofResponse> {
|
||||
trace!("fetching proof");
|
||||
let block = Some(BlockId::from(block));
|
||||
let proof_response = self
|
||||
.provider
|
||||
|
|
|
@ -10,7 +10,7 @@ use eyre::{eyre, Result};
|
|||
|
||||
use crate::types::CallOpts;
|
||||
|
||||
use super::Rpc;
|
||||
use super::ExecutionRpc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MockRpc {
|
||||
|
@ -18,7 +18,7 @@ pub struct MockRpc {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Rpc for MockRpc {
|
||||
impl ExecutionRpc for MockRpc {
|
||||
fn new(rpc: &str) -> Result<Self> {
|
||||
let path = PathBuf::from(rpc);
|
||||
Ok(MockRpc { path })
|
||||
|
|
|
@ -11,7 +11,7 @@ pub mod http_rpc;
|
|||
pub mod mock_rpc;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Rpc: Send + Clone + 'static {
|
||||
pub trait ExecutionRpc: Send + Clone + 'static {
|
||||
fn new(rpc: &str) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
|
|
Loading…
Reference in New Issue