Compare commits

..

3 Commits

Author SHA1 Message Date
Andreas Bigger adcdddf20c 🏗️ p2p 2022-12-16 05:51:36 -07:00
Andreas Bigger 57887c4c36 Merge branch 'master' into @refcell/p2p 2022-12-14 09:58:22 -07:00
Andreas Bigger 3f52dc0cfa 🚀 p2p setup 2022-12-05 15:59:32 -08:00
67 changed files with 4146 additions and 7178 deletions

View File

@ -1,9 +1,8 @@
name: benchmarks name: benchmarks
on: on:
workflow_bench: push:
# push: branches: [ "master" ]
# branches: [ "master" ]
env: env:
MAINNET_RPC_URL: ${{ secrets.MAINNET_RPC_URL }} MAINNET_RPC_URL: ${{ secrets.MAINNET_RPC_URL }}

View File

@ -6,10 +6,6 @@ on:
pull_request: pull_request:
branches: [ "master" ] branches: [ "master" ]
env:
MAINNET_RPC_URL: ${{ secrets.MAINNET_RPC_URL }}
GOERLI_RPC_URL: ${{ secrets.GOERLI_RPC_URL }}
jobs: jobs:
check: check:
runs-on: ubuntu-latest runs-on: ubuntu-latest

8
.gitignore vendored
View File

@ -1,7 +1,3 @@
.DS_Store /target
target
*.env
helios-ts/node_modules *.env
helios-ts/dist
helios-ts/helios-*.tgz

4190
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,8 @@
[package] [package]
name = "helios" name = "helios"
version = "0.3.0" version = "0.1.2"
edition = "2021" edition = "2021"
autobenches = false autobenches = false
exclude = [
"benches"
]
[workspace] [workspace]
members = [ members = [
@ -15,12 +12,11 @@ members = [
"config", "config",
"consensus", "consensus",
"execution", "execution",
"helios-ts",
] ]
default-members = ["cli"]
[profile.bench] [features]
debug = true default = []
p2p = []
[dependencies] [dependencies]
client = { path = "./client" } client = { path = "./client" }
@ -28,28 +24,20 @@ config = { path = "./config" }
common = { path = "./common" } common = { path = "./common" }
consensus = { path = "./consensus" } consensus = { path = "./consensus" }
execution = { path = "./execution" } execution = { path = "./execution" }
serde = { version = "1.0.154", features = ["derive"] }
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] [dev-dependencies]
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
eyre = "0.6.8" eyre = "0.6.8"
dirs = "4.0.0" home = "0.5.4"
ethers = { version = "1.0.2", features = [ "abigen" ] } ethers = "1.0.2"
env_logger = "0.9.0" env_logger = "0.9.0"
log = "0.4.17" log = "0.4.17"
tracing-test = "0.2.4" tracing-test = "0.2.3"
criterion = { version = "0.4", features = [ "async_tokio", "plotters" ]} criterion = { version = "0.4", features = [ "async_tokio", "plotters" ]}
plotters = "0.3.4" plotters = "0.3.3"
tempfile = "3.4.0" tempfile = "3.3.0"
hex = "0.4.3" hex = "0.4.3"
[profile.release]
strip = true
opt-level = "z"
lto = true
codegen-units = 1
panic = "abort"
###################################### ######################################
# Examples # Examples
###################################### ######################################
@ -59,8 +47,8 @@ name = "checkpoints"
path = "examples/checkpoints.rs" path = "examples/checkpoints.rs"
[[example]] [[example]]
name = "basic" name = "readme"
path = "examples/basic.rs" path = "examples/readme.rs"
[[example]] [[example]]
name = "client" name = "client"
@ -70,10 +58,6 @@ path = "examples/client.rs"
name = "config" name = "config"
path = "examples/config.rs" path = "examples/config.rs"
[[example]]
name = "call"
path = "examples/call.rs"
###################################### ######################################
# Benchmarks # Benchmarks
###################################### ######################################

View File

@ -6,7 +6,7 @@ Helios is a fully trustless, efficient, and portable Ethereum light client writt
Helios converts an untrusted centralized RPC endpoint into a safe unmanipulable local RPC for its users. It syncs in seconds, requires no storage, and is lightweight enough to run on mobile devices. Helios converts an untrusted centralized RPC endpoint into a safe unmanipulable local RPC for its users. It syncs in seconds, requires no storage, and is lightweight enough to run on mobile devices.
The entire size of Helios's binary is 5.3Mb and should be easy to compile into WebAssembly. This makes it a perfect target to embed directly inside wallets and dapps. The entire size of Helios's binary is 13Mb and should be easy to compile into WebAssembly. This makes it a perfect target to embed directly inside wallets and dapps.
## Installing ## Installing
@ -40,7 +40,7 @@ Helios is still experimental software. While we hope you try it out, we do not s
### Additional Options ### Additional Options
`--consensus-rpc` or `-c` can be used to set a custom consensus layer rpc endpoint. This must be a consensus node that supports the light client beaconchain api. We recommend using Nimbus for this. If no consensus rpc is supplied, it defaults to `https://www.lightclientdata.org` which is run by us. `--consensus-rpc` or `-c` can be used to set a custom consensus layer rpc endpoint. This must be a consenus node that supports the light client beaconchain api. We recommend using Nimbus for this. If no consensus rpc is supplied, it defaults to `https://www.lightclientdata.org` which is run by us.
`--checkpoint` or `-w` can be used to set a custom weak subjectivity checkpoint. This must be equal the first beacon blockhash of an epoch. Weak subjectivity checkpoints are the root of trust in the system. If this is set to a malicious value, an attacker can cause the client to sync to the wrong chain. Helios sets a default value initially, then caches the most recent finalized block it has seen for later use. `--checkpoint` or `-w` can be used to set a custom weak subjectivity checkpoint. This must be equal the first beacon blockhash of an epoch. Weak subjectivity checkpoints are the root of trust in the system. If this is set to a malicious value, an attacker can cause the client to sync to the wrong chain. Helios sets a default value initially, then caches the most recent finalized block it has seen for later use.
@ -58,13 +58,11 @@ For example, you can specify the fallback like so: `helios --fallback "https://s
For example, say you set a checkpoint value that is too outdated and Helios cannot sync to it. For example, say you set a checkpoint value that is too outdated and Helios cannot sync to it.
If this flag is set, Helios will query all network apis in the community-maintained list If this flag is set, Helios will query all network apis in the community-maintained list
at [ethpandaops/checkpoint-synz-health-checks](https://github.com/ethpandaops/checkpoint-sync-health-checks/blob/master/_data/endpoints.yaml) for their latest slots. at [ethpandaops/checkpoint-synz-health-checks](https://github.com/ethpandaops/checkpoint-sync-health-checks/blob/master/_data/endpoints.yaml) for their latest slots.
The list of slots is filtered for healthy apis and the most frequent checkpoint occurring in the latest epoch will be returned. The list of slots is filtered for healthy apis and the most frequent checkpoint occuring in the latest epoch will be returned.
Note: this is a community-maintained list and thus no security guarantees are provided. Use this is a last resort if your checkpoint passed into `--checkpoint` fails. Note: this is a community-maintained list and thus no security guarantees are provided. Use this is a last resort if your checkpoint passed into `--checkpoint` fails.
This is not recommended as malicious checkpoints can be returned from the listed apis, even if they are considered _healthy_. This is not recommened as malicious checkpoints can be returned from the listed apis, even if they are considered _healthy_.
This can be run like so: `helios --load-external-fallback` (or `helios -l` with the shorthand). This can be run like so: `helios --load-external-fallback` (or `helios -l` with the shorthand).
`--strict-checkpoint-age` or `-s` enables strict checkpoint age checking. If the checkpoint is over two weeks old and this flag is enabled, Helios will error. Without this flag, Helios will instead surface a warning to the user and continue. If the checkpoint is greater than two weeks old, there are theoretical attacks that can cause Helios and over light clients to sync incorrectly. These attacks are complex and expensive, so Helios disables this by default.
`--help` or `-h` prints the help message. `--help` or `-h` prints the help message.
### Configuration Files ### Configuration Files
@ -148,52 +146,12 @@ async fn main() -> Result<()> {
} }
``` ```
## Architecture
```mermaid
graph LR
Client ----> Rpc
Client ----> Node
Node ----> ConsensusClient
Node ----> ExecutionClient
ExecutionClient ----> ExecutionRpc
ConsensusClient ----> ConsensusRpc
Node ----> Evm
Evm ----> ExecutionClient
ExecutionRpc --> UntrustedExecutionRpc
ConsensusRpc --> UntrustedConsensusRpc
classDef node fill:#f9f,stroke:#333,stroke-width:4px, color:black;
class Node,Client node
classDef execution fill:#f0f,stroke:#333,stroke-width:4px;
class ExecutionClient,ExecutionRpc execution
classDef consensus fill:#ff0,stroke:#333,stroke-width:4px;
class ConsensusClient,ConsensusRpc consensus
classDef evm fill:#0ff,stroke:#333,stroke-width:4px;
class Evm evm
classDef providerC fill:#ffc
class UntrustedConsensusRpc providerC
classDef providerE fill:#fbf
class UntrustedExecutionRpc providerE
classDef rpc fill:#e10
class Rpc rpc
subgraph "External Network"
UntrustedExecutionRpc
UntrustedConsensusRpc
end
```
## Benchmarks ## Benchmarks
Benchmarks are defined in the [benches](./benches/) subdirectory. They are built using the [criterion](https://github.com/bheisler/criterion.rs) statistics-driven benchmarking library. Benchmarks are defined in the [benches](./benches/) subdirectory. They are built using the [criterion](https://github.com/bheisler/criterion.rs) statistics-driven benchmarking library.
To run all benchmarks, you can use `cargo bench`. To run a specific benchmark, you can use `cargo bench --bench <name>`, where `<name>` is one of the benchmarks defined in the [Cargo.toml](./Cargo.toml) file under a `[[bench]]` section. To run all benchmarks, you can use `cargo bench`. To run a specific benchmark, you can use `cargo bench --bench <name>`, where `<name>` is one of the benchmarks defined in the [Cargo.toml](./Cargo.toml) file under a `[[bench]]` section.
To learn more about [helios](https://github.com/a16z/helios) benchmarking and to view benchmark flamegraphs, view the [benchmark readme](./benches/README.md).
## Contributing ## Contributing
All contributions to Helios are welcome. Before opening a PR, please submit an issue detailing the bug or feature. When opening a PR, please ensure that your contribution builds on the nightly rust toolchain, has been linted with `cargo fmt`, and contains tests when applicable. All contributions to Helios are welcome. Before opening a PR, please submit an issue detailing the bug or feature. When opening a PR, please ensure that your contribution builds on the nightly rust toolchain, has been linted with `cargo fmt`, and contains tests when applicable.
@ -205,3 +163,4 @@ If you are having trouble with Helios or are considering contributing, feel free
## Disclaimer ## Disclaimer
_This code is being provided as is. No guarantee, representation or warranty is being made, express or implied, as to the safety or correctness of the code. It has not been audited and as such there can be no assurance it will work as intended, and users may experience delays, failures, errors, omissions or loss of transmitted information. Nothing in this repo should be construed as investment advice or legal advice for any particular facts or circumstances and is not meant to replace competent counsel. It is strongly advised for you to contact a reputable attorney in your jurisdiction for any questions or concerns with respect thereto. a16z is not liable for any use of the foregoing, and users should proceed with caution and use at their own risk. See a16z.com/disclosures for more info._ _This code is being provided as is. No guarantee, representation or warranty is being made, express or implied, as to the safety or correctness of the code. It has not been audited and as such there can be no assurance it will work as intended, and users may experience delays, failures, errors, omissions or loss of transmitted information. Nothing in this repo should be construed as investment advice or legal advice for any particular facts or circumstances and is not meant to replace competent counsel. It is strongly advised for you to contact a reputable attorney in your jurisdiction for any questions or concerns with respect thereto. a16z is not liable for any use of the foregoing, and users should proceed with caution and use at their own risk. See a16z.com/disclosures for more info._

View File

@ -1,18 +0,0 @@
# Helios Benchmarking
Helios performance is measured using [criterion](https://github.com/bheisler/criterion.rs) for comprehensive statistics-driven benchmarking.
Benchmarks are defined in the [benches](./) subdirectory and can be run using the cargo `bench` subcommand (eg `cargo bench`). To run a specific benchmark, you can use `cargo bench --bench <name>`, where `<name>` is one of the benchmarks defined in the [Cargo.toml](./Cargo.toml) file under a `[[bench]]` section.
#### Flamegraphs
[Flamegraph](https://github.com/brendangregg/FlameGraph) is a powerful rust crate for generating profile visualizations, that is graphing the time a program spends in each function. Functions called during execution are displayed as horizontal rectangles with the width proportional to the time spent in that function. As the call stack grows (think nested function invocations), the rectangles are stacked vertically. This provides a powerful visualization for quickly understanding which parts of a codebase take up disproportionate amounts of time.
Check out Brendan Gregg's [Flame Graphs](http://www.brendangregg.com/flamegraphs.html) blog post if you're interested in learning more about flamegraphs and performance visualizations in general.
To generate a flamegraph for helios, you can use the `cargo flamegraph` subcommand. For example, to generate a flamegraph for the [`client`](./examples/client.rs) example, you can run:
```bash
cargo flamegraph --example client -o ./flamegraphs/client.svg
```

View File

@ -1,5 +1,4 @@
use client::database::Database; use client::database::Database;
use config::Config;
use criterion::{criterion_group, criterion_main, Criterion}; use criterion::{criterion_group, criterion_main, Criterion};
use helios::prelude::FileDB; use helios::prelude::FileDB;
use tempfile::tempdir; use tempfile::tempdir;
@ -18,12 +17,8 @@ pub fn save_checkpoint(c: &mut Criterion) {
c.bench_function("save_checkpoint", |b| { c.bench_function("save_checkpoint", |b| {
let checkpoint = vec![1, 2, 3]; let checkpoint = vec![1, 2, 3];
b.iter(|| { b.iter(|| {
let data_dir = Some(tempdir().unwrap().into_path()); let temp_dir = tempdir().unwrap().into_path();
let config = Config { let db = FileDB::new(temp_dir);
data_dir,
..Default::default()
};
let db = FileDB::new(&config).unwrap();
db.save_checkpoint(checkpoint.clone()).unwrap(); db.save_checkpoint(checkpoint.clone()).unwrap();
}) })
}); });
@ -33,17 +28,14 @@ pub fn save_checkpoint(c: &mut Criterion) {
pub fn load_checkpoint(c: &mut Criterion) { pub fn load_checkpoint(c: &mut Criterion) {
c.bench_function("load_checkpoint", |b| { c.bench_function("load_checkpoint", |b| {
// First write to the db // First write to the db
let data_dir = Some(tempdir().unwrap().into_path()); let temp_dir = tempdir().unwrap().into_path();
let config = Config { let db = FileDB::new(temp_dir.clone());
data_dir, let written_checkpoint = vec![1, 2, 3];
..Default::default()
};
let db = FileDB::new(&config).unwrap();
let written_checkpoint = vec![1; 32];
db.save_checkpoint(written_checkpoint.clone()).unwrap(); db.save_checkpoint(written_checkpoint.clone()).unwrap();
// Then read from the db // Then read from the db
b.iter(|| { b.iter(|| {
let db = FileDB::new(temp_dir.clone());
let checkpoint = db.load_checkpoint().unwrap(); let checkpoint = db.load_checkpoint().unwrap();
assert_eq!(checkpoint, written_checkpoint.clone()); assert_eq!(checkpoint, written_checkpoint.clone());
}) })

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 27 KiB

View File

@ -48,14 +48,7 @@ pub fn bench_goerli_get_balance(c: &mut Criterion) {
.unwrap(); .unwrap();
// Construct a goerli client using our harness and tokio runtime. // Construct a goerli client using our harness and tokio runtime.
let gc = match harness::construct_goerli_client(&rt) { let client = std::sync::Arc::new(harness::construct_goerli_client(&rt).unwrap());
Ok(gc) => gc,
Err(e) => {
println!("failed to construct goerli client: {}", e);
std::process::exit(1);
}
};
let client = std::sync::Arc::new(gc);
// Get the beacon chain deposit contract address. // Get the beacon chain deposit contract address.
let addr = Address::from_str("0xB4FBF271143F4FBf7B91A5ded31805e42b2208d6").unwrap(); let addr = Address::from_str("0xB4FBF271143F4FBf7B91A5ded31805e42b2208d6").unwrap();

View File

@ -2,12 +2,12 @@
use std::{str::FromStr, sync::Arc}; use std::{str::FromStr, sync::Arc};
use ::client::{database::ConfigDB, Client}; use ::client::Client;
use ethers::{ use ethers::{
abi::Address, abi::Address,
types::{H256, U256}, types::{H256, U256},
}; };
use helios::{client, config::networks, types::BlockTag}; use helios::{client, config::networks, prelude::FileDB, types::BlockTag};
/// Fetches the latest mainnet checkpoint from the fallback service. /// Fetches the latest mainnet checkpoint from the fallback service.
/// ///
@ -29,11 +29,11 @@ pub async fn fetch_mainnet_checkpoint() -> eyre::Result<H256> {
/// The client will use `https://www.lightclientdata.org` as the consensus RPC. /// The client will use `https://www.lightclientdata.org` as the consensus RPC.
pub fn construct_mainnet_client( pub fn construct_mainnet_client(
rt: &tokio::runtime::Runtime, rt: &tokio::runtime::Runtime,
) -> eyre::Result<client::Client<ConfigDB>> { ) -> eyre::Result<client::Client<client::FileDB>> {
rt.block_on(inner_construct_mainnet_client()) rt.block_on(inner_construct_mainnet_client())
} }
pub async fn inner_construct_mainnet_client() -> eyre::Result<client::Client<ConfigDB>> { pub async fn inner_construct_mainnet_client() -> eyre::Result<client::Client<client::FileDB>> {
let benchmark_rpc_url = std::env::var("MAINNET_RPC_URL")?; let benchmark_rpc_url = std::env::var("MAINNET_RPC_URL")?;
let mut client = client::ClientBuilder::new() let mut client = client::ClientBuilder::new()
.network(networks::Network::MAINNET) .network(networks::Network::MAINNET)
@ -47,7 +47,7 @@ pub async fn inner_construct_mainnet_client() -> eyre::Result<client::Client<Con
pub async fn construct_mainnet_client_with_checkpoint( pub async fn construct_mainnet_client_with_checkpoint(
checkpoint: &str, checkpoint: &str,
) -> eyre::Result<client::Client<ConfigDB>> { ) -> eyre::Result<client::Client<client::FileDB>> {
let benchmark_rpc_url = std::env::var("MAINNET_RPC_URL")?; let benchmark_rpc_url = std::env::var("MAINNET_RPC_URL")?;
let mut client = client::ClientBuilder::new() let mut client = client::ClientBuilder::new()
.network(networks::Network::MAINNET) .network(networks::Network::MAINNET)
@ -79,7 +79,7 @@ pub fn construct_runtime() -> tokio::runtime::Runtime {
/// The client will use `http://testing.prater.beacon-api.nimbus.team` as the consensus RPC. /// The client will use `http://testing.prater.beacon-api.nimbus.team` as the consensus RPC.
pub fn construct_goerli_client( pub fn construct_goerli_client(
rt: &tokio::runtime::Runtime, rt: &tokio::runtime::Runtime,
) -> eyre::Result<client::Client<ConfigDB>> { ) -> eyre::Result<client::Client<client::FileDB>> {
rt.block_on(async { rt.block_on(async {
let benchmark_rpc_url = std::env::var("GOERLI_RPC_URL")?; let benchmark_rpc_url = std::env::var("GOERLI_RPC_URL")?;
let mut client = client::ClientBuilder::new() let mut client = client::ClientBuilder::new()
@ -96,7 +96,7 @@ pub fn construct_goerli_client(
/// Gets the balance of the given address on mainnet. /// Gets the balance of the given address on mainnet.
pub fn get_balance( pub fn get_balance(
rt: &tokio::runtime::Runtime, rt: &tokio::runtime::Runtime,
client: Arc<Client<ConfigDB>>, client: Arc<Client<FileDB>>,
address: &str, address: &str,
) -> eyre::Result<U256> { ) -> eyre::Result<U256> {
rt.block_on(async { rt.block_on(async {

View File

@ -2,7 +2,7 @@ cargo-features = ["different-binary-name"]
[package] [package]
name = "cli" name = "cli"
version = "0.3.0" version = "0.1.0"
edition = "2021" edition = "2021"
[[bin]] [[bin]]

View File

@ -1,4 +1,5 @@
use std::{ use std::{
fs,
path::PathBuf, path::PathBuf,
process::exit, process::exit,
str::FromStr, str::FromStr,
@ -14,25 +15,16 @@ use eyre::Result;
use client::{database::FileDB, Client, ClientBuilder}; use client::{database::FileDB, Client, ClientBuilder};
use config::{CliConfig, Config}; use config::{CliConfig, Config};
use futures::executor::block_on; use futures::executor::block_on;
use log::{error, info}; use log::info;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
let config = get_config(); let config = get_config();
let mut client = match ClientBuilder::new().config(config).build() { let mut client = ClientBuilder::new().config(config).build()?;
Ok(client) => client,
Err(err) => {
error!("{}", err);
exit(1);
}
};
if let Err(err) = client.start().await { client.start().await?;
error!("{}", err);
exit(1);
}
register_shutdown_handler(client); register_shutdown_handler(client);
std::future::pending().await std::future::pending().await
@ -80,8 +72,6 @@ fn get_config() -> Config {
} }
#[derive(Parser)] #[derive(Parser)]
#[clap(version, about)]
/// Helios is a fast, secure, and portable light client for Ethereum
struct Cli { struct Cli {
#[clap(short, long, default_value = "mainnet")] #[clap(short, long, default_value = "mainnet")]
network: String, network: String,
@ -99,16 +89,14 @@ struct Cli {
fallback: Option<String>, fallback: Option<String>,
#[clap(short = 'l', long, env)] #[clap(short = 'l', long, env)]
load_external_fallback: bool, load_external_fallback: bool,
#[clap(short = 's', long, env)]
strict_checkpoint_age: bool,
} }
impl Cli { impl Cli {
fn as_cli_config(&self) -> CliConfig { fn as_cli_config(&self) -> CliConfig {
let checkpoint = self let checkpoint = match &self.checkpoint {
.checkpoint Some(checkpoint) => Some(hex_str_to_bytes(checkpoint).expect("invalid checkpoint")),
.as_ref() None => self.get_cached_checkpoint(),
.map(|c| hex_str_to_bytes(c).expect("invalid checkpoint")); };
CliConfig { CliConfig {
checkpoint, checkpoint,
@ -118,7 +106,21 @@ impl Cli {
rpc_port: self.rpc_port, rpc_port: self.rpc_port,
fallback: self.fallback.clone(), fallback: self.fallback.clone(),
load_external_fallback: self.load_external_fallback, load_external_fallback: self.load_external_fallback,
strict_checkpoint_age: self.strict_checkpoint_age, }
}
fn get_cached_checkpoint(&self) -> Option<Vec<u8>> {
let data_dir = self.get_data_dir();
let checkpoint_file = data_dir.join("checkpoint");
if checkpoint_file.exists() {
let checkpoint_res = fs::read(checkpoint_file);
match checkpoint_res {
Ok(checkpoint) => Some(checkpoint),
Err(_) => None,
}
} else {
None
} }
} }

View File

@ -1,29 +1,24 @@
[package] [package]
name = "client" name = "client"
version = "0.3.0" version = "0.1.1"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
tokio = { version = "1", features = ["full"] }
eyre = "0.6.8" eyre = "0.6.8"
serde = { version = "1.0.143", features = ["derive"] } serde = { version = "1.0.143", features = ["derive"] }
hex = "0.4.3" hex = "0.4.3"
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "d09f55b4f8554491e3431e01af1c32347a8781cd" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f18ca919cc1b685b861d0fa9e2daabe89737" }
ethers = "1.0.0" ethers = "1.0.2"
jsonrpsee = { version = "0.15.1", features = ["full"] }
futures = "0.3.23" futures = "0.3.23"
log = "0.4.17" log = "0.4.17"
thiserror = "1.0.37" thiserror = "1.0.37"
discv5 = { version = "0.1.0" }
libp2p = { version = "0.50.0", features = ["full"] }
common = { path = "../common" } common = { path = "../common" }
consensus = { path = "../consensus" } consensus = { path = "../consensus" }
execution = { path = "../execution" } execution = { path = "../execution" }
config = { path = "../config" } config = { path = "../config" }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
jsonrpsee = { version = "0.15.1", features = ["full"] }
tokio = { version = "1", features = ["full"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-timers = "0.2.6"
wasm-bindgen-futures = "0.4.33"
tokio = { version = "1", features = ["sync"] }

View File

@ -1,37 +1,22 @@
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use config::networks::Network; use config::networks::Network;
use consensus::errors::ConsensusError;
use ethers::prelude::{Address, U256}; use ethers::prelude::{Address, U256};
use ethers::types::{ use ethers::types::{Filter, Log, Transaction, TransactionReceipt, H256};
FeeHistory, Filter, Log, SyncingStatus, Transaction, TransactionReceipt, H256,
};
use eyre::{eyre, Result}; use eyre::{eyre, Result};
use common::types::BlockTag; use common::types::BlockTag;
use config::{CheckpointFallback, Config}; use config::{CheckpointFallback, Config};
use consensus::{types::Header, ConsensusClient}; use consensus::{types::Header, ConsensusClient};
use execution::types::{CallOpts, ExecutionBlock}; use execution::types::{CallOpts, ExecutionBlock};
use log::{error, info, warn}; use log::{info, warn};
use tokio::sync::RwLock;
#[cfg(not(target_arch = "wasm32"))]
use std::path::PathBuf;
#[cfg(not(target_arch = "wasm32"))]
use tokio::spawn; use tokio::spawn;
#[cfg(not(target_arch = "wasm32"))] use tokio::sync::RwLock;
use tokio::time::sleep; use tokio::time::sleep;
#[cfg(target_arch = "wasm32")] use crate::database::{Database, FileDB};
use gloo_timers::callback::Interval;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::spawn_local;
use crate::database::Database;
use crate::errors::NodeError;
use crate::node::Node; use crate::node::Node;
#[cfg(not(target_arch = "wasm32"))]
use crate::rpc::Rpc; use crate::rpc::Rpc;
#[derive(Default)] #[derive(Default)]
@ -40,14 +25,11 @@ pub struct ClientBuilder {
consensus_rpc: Option<String>, consensus_rpc: Option<String>,
execution_rpc: Option<String>, execution_rpc: Option<String>,
checkpoint: Option<Vec<u8>>, checkpoint: Option<Vec<u8>>,
#[cfg(not(target_arch = "wasm32"))]
rpc_port: Option<u16>, rpc_port: Option<u16>,
#[cfg(not(target_arch = "wasm32"))]
data_dir: Option<PathBuf>, data_dir: Option<PathBuf>,
config: Option<Config>, config: Option<Config>,
fallback: Option<String>, fallback: Option<String>,
load_external_fallback: bool, load_external_fallback: bool,
strict_checkpoint_age: bool,
} }
impl ClientBuilder { impl ClientBuilder {
@ -77,13 +59,11 @@ impl ClientBuilder {
self self
} }
#[cfg(not(target_arch = "wasm32"))]
pub fn rpc_port(mut self, port: u16) -> Self { pub fn rpc_port(mut self, port: u16) -> Self {
self.rpc_port = Some(port); self.rpc_port = Some(port);
self self
} }
#[cfg(not(target_arch = "wasm32"))]
pub fn data_dir(mut self, data_dir: PathBuf) -> Self { pub fn data_dir(mut self, data_dir: PathBuf) -> Self {
self.data_dir = Some(data_dir); self.data_dir = Some(data_dir);
self self
@ -104,12 +84,7 @@ impl ClientBuilder {
self self
} }
pub fn strict_checkpoint_age(mut self) -> Self { pub fn build(self) -> Result<Client<FileDB>> {
self.strict_checkpoint_age = true;
self
}
pub fn build<DB: Database>(self) -> Result<Client<DB>> {
let base_config = if let Some(network) = self.network { let base_config = if let Some(network) = self.network {
network.to_base_config() network.to_base_config()
} else { } else {
@ -137,20 +112,13 @@ impl ClientBuilder {
}); });
let checkpoint = if let Some(checkpoint) = self.checkpoint { let checkpoint = if let Some(checkpoint) = self.checkpoint {
Some(checkpoint) checkpoint
} else if let Some(config) = &self.config { } else if let Some(config) = &self.config {
config.checkpoint.clone() config.checkpoint.clone()
} else { } else {
None base_config.checkpoint
}; };
let default_checkpoint = if let Some(config) = &self.config {
config.default_checkpoint.clone()
} else {
base_config.default_checkpoint.clone()
};
#[cfg(not(target_arch = "wasm32"))]
let rpc_port = if self.rpc_port.is_some() { let rpc_port = if self.rpc_port.is_some() {
self.rpc_port self.rpc_port
} else if let Some(config) = &self.config { } else if let Some(config) = &self.config {
@ -159,7 +127,6 @@ impl ClientBuilder {
None None
}; };
#[cfg(not(target_arch = "wasm32"))]
let data_dir = if self.data_dir.is_some() { let data_dir = if self.data_dir.is_some() {
self.data_dir self.data_dir
} else if let Some(config) = &self.config { } else if let Some(config) = &self.config {
@ -182,31 +149,17 @@ impl ClientBuilder {
self.load_external_fallback self.load_external_fallback
}; };
let strict_checkpoint_age = if let Some(config) = &self.config {
self.strict_checkpoint_age || config.strict_checkpoint_age
} else {
self.strict_checkpoint_age
};
let config = Config { let config = Config {
consensus_rpc, consensus_rpc,
execution_rpc, execution_rpc,
checkpoint, checkpoint,
default_checkpoint,
#[cfg(not(target_arch = "wasm32"))]
rpc_port, rpc_port,
#[cfg(target_arch = "wasm32")]
rpc_port: None,
#[cfg(not(target_arch = "wasm32"))]
data_dir, data_dir,
#[cfg(target_arch = "wasm32")]
data_dir: None,
chain: base_config.chain, chain: base_config.chain,
forks: base_config.forks, forks: base_config.forks,
max_checkpoint_age: base_config.max_checkpoint_age, max_checkpoint_age: base_config.max_checkpoint_age,
fallback, fallback,
load_external_fallback, load_external_fallback,
strict_checkpoint_age,
}; };
Client::new(config) Client::new(config)
@ -215,84 +168,52 @@ impl ClientBuilder {
pub struct Client<DB: Database> { pub struct Client<DB: Database> {
node: Arc<RwLock<Node>>, node: Arc<RwLock<Node>>,
#[cfg(not(target_arch = "wasm32"))]
rpc: Option<Rpc>, rpc: Option<Rpc>,
db: DB, db: Option<DB>,
fallback: Option<String>, fallback: Option<String>,
load_external_fallback: bool, load_external_fallback: bool,
} }
impl<DB: Database> Client<DB> { impl Client<FileDB> {
fn new(mut config: Config) -> Result<Self> { fn new(config: Config) -> Result<Self> {
let db = DB::new(&config)?;
if config.checkpoint.is_none() {
let checkpoint = db.load_checkpoint()?;
config.checkpoint = Some(checkpoint);
}
let config = Arc::new(config); let config = Arc::new(config);
let node = Node::new(config.clone())?; let node = Node::new(config.clone())?;
let node = Arc::new(RwLock::new(node)); let node = Arc::new(RwLock::new(node));
#[cfg(not(target_arch = "wasm32"))]
let rpc = config.rpc_port.map(|port| Rpc::new(node.clone(), port)); let rpc = config.rpc_port.map(|port| Rpc::new(node.clone(), port));
let data_dir = config.data_dir.clone();
let db = data_dir.map(FileDB::new);
Ok(Client { Ok(Client {
node, node,
#[cfg(not(target_arch = "wasm32"))]
rpc, rpc,
db, db,
fallback: config.fallback.clone(), fallback: config.fallback.clone(),
load_external_fallback: config.load_external_fallback, load_external_fallback: config.load_external_fallback,
}) })
} }
}
impl<DB: Database> Client<DB> {
pub async fn start(&mut self) -> Result<()> { pub async fn start(&mut self) -> Result<()> {
#[cfg(not(target_arch = "wasm32"))]
if let Some(rpc) = &mut self.rpc { if let Some(rpc) = &mut self.rpc {
rpc.start().await?; rpc.start().await?;
} }
let sync_res = self.node.write().await.sync().await; if self.node.write().await.sync().await.is_err() {
warn!(
if let Err(err) = sync_res { "failed to sync consensus node with checkpoint: 0x{}",
match err { hex::encode(&self.node.read().await.config.checkpoint),
NodeError::ConsensusSyncError(err) => match err.downcast_ref() { );
Some(ConsensusError::CheckpointTooOld) => { let fallback = self.boot_from_fallback().await;
warn!( if fallback.is_err() && self.load_external_fallback {
"failed to sync consensus node with checkpoint: 0x{}", self.boot_from_external_fallbacks().await?
hex::encode( } else if fallback.is_err() {
self.node return Err(eyre::eyre!("Checkpoint is too old. Please update your checkpoint. Alternatively, set an explicit checkpoint fallback service url with the `-f` flag or use the configured external fallback services with `-l` (NOT RECOMMENED). See https://github.com/a16z/helios#additional-options for more information."));
.read()
.await
.config
.checkpoint
.clone()
.unwrap_or_default()
),
);
let fallback = self.boot_from_fallback().await;
if fallback.is_err() && self.load_external_fallback {
self.boot_from_external_fallbacks().await?
} else if fallback.is_err() {
error!("Invalid checkpoint. Please update your checkpoint too a more recent block. Alternatively, set an explicit checkpoint fallback service url with the `-f` flag or use the configured external fallback services with `-l` (NOT RECOMMENDED). See https://github.com/a16z/helios#additional-options for more information.");
return Err(err);
}
}
_ => return Err(err),
},
_ => return Err(err.into()),
} }
} }
self.start_advance_thread();
Ok(())
}
#[cfg(not(target_arch = "wasm32"))]
fn start_advance_thread(&self) {
let node = self.node.clone(); let node = self.node.clone();
spawn(async move { spawn(async move {
loop { loop {
@ -302,25 +223,11 @@ impl<DB: Database> Client<DB> {
} }
let next_update = node.read().await.duration_until_next_update(); let next_update = node.read().await.duration_until_next_update();
sleep(next_update).await; sleep(next_update).await;
} }
}); });
}
#[cfg(target_arch = "wasm32")] Ok(())
fn start_advance_thread(&self) {
let node = self.node.clone();
Interval::new(12000, move || {
let node = node.clone();
spawn_local(async move {
let res = node.write().await.advance().await;
if let Err(err) = res {
warn!("consensus error: {}", err);
}
});
})
.forget();
} }
async fn boot_from_fallback(&self) -> eyre::Result<()> { async fn boot_from_fallback(&self) -> eyre::Result<()> {
@ -401,8 +308,8 @@ impl<DB: Database> Client<DB> {
}; };
info!("saving last checkpoint hash"); info!("saving last checkpoint hash");
let res = self.db.save_checkpoint(checkpoint); let res = self.db.as_ref().map(|db| db.save_checkpoint(checkpoint));
if res.is_err() { if res.is_some() && res.unwrap().is_err() {
warn!("checkpoint save failed"); warn!("checkpoint save failed");
} }
} }
@ -503,19 +410,6 @@ impl<DB: Database> Client<DB> {
self.node.read().await.get_block_number() self.node.read().await.get_block_number()
} }
pub async fn get_fee_history(
&self,
block_count: u64,
last_block: u64,
reward_percentiles: &[f64],
) -> Result<Option<FeeHistory>> {
self.node
.read()
.await
.get_fee_history(block_count, last_block, reward_percentiles)
.await
}
pub async fn get_block_by_number( pub async fn get_block_by_number(
&self, &self,
block: BlockTag, block: BlockTag,
@ -540,31 +434,11 @@ impl<DB: Database> Client<DB> {
.await .await
} }
pub async fn get_transaction_by_block_hash_and_index(
&self,
block_hash: &Vec<u8>,
index: usize,
) -> Result<Option<Transaction>> {
self.node
.read()
.await
.get_transaction_by_block_hash_and_index(block_hash, index)
.await
}
pub async fn chain_id(&self) -> u64 { pub async fn chain_id(&self) -> u64 {
self.node.read().await.chain_id() self.node.read().await.chain_id()
} }
pub async fn syncing(&self) -> Result<SyncingStatus> {
self.node.read().await.syncing()
}
pub async fn get_header(&self) -> Result<Header> { pub async fn get_header(&self) -> Result<Header> {
self.node.read().await.get_header() self.node.read().await.get_header()
} }
pub async fn get_coinbase(&self) -> Result<Address> {
self.node.read().await.get_coinbase()
}
} }

View File

@ -1,40 +1,27 @@
#[cfg(not(target_arch = "wasm32"))]
use std::{ use std::{
fs, fs,
io::{Read, Write}, io::{Read, Write},
path::PathBuf, path::PathBuf,
}; };
use config::Config;
use eyre::Result; use eyre::Result;
pub trait Database { pub trait Database {
fn new(config: &Config) -> Result<Self>
where
Self: Sized;
fn save_checkpoint(&self, checkpoint: Vec<u8>) -> Result<()>; fn save_checkpoint(&self, checkpoint: Vec<u8>) -> Result<()>;
fn load_checkpoint(&self) -> Result<Vec<u8>>; fn load_checkpoint(&self) -> Result<Vec<u8>>;
} }
#[cfg(not(target_arch = "wasm32"))]
pub struct FileDB { pub struct FileDB {
data_dir: PathBuf, data_dir: PathBuf,
default_checkpoint: Vec<u8>,
} }
#[cfg(not(target_arch = "wasm32"))] impl FileDB {
impl Database for FileDB { pub fn new(data_dir: PathBuf) -> Self {
fn new(config: &Config) -> Result<Self> { FileDB { data_dir }
if let Some(data_dir) = &config.data_dir {
return Ok(FileDB {
data_dir: data_dir.to_path_buf(),
default_checkpoint: config.default_checkpoint.clone(),
});
}
eyre::bail!("data dir not in config")
} }
}
impl Database for FileDB {
fn save_checkpoint(&self, checkpoint: Vec<u8>) -> Result<()> { fn save_checkpoint(&self, checkpoint: Vec<u8>) -> Result<()> {
fs::create_dir_all(&self.data_dir)?; fs::create_dir_all(&self.data_dir)?;
@ -50,40 +37,13 @@ impl Database for FileDB {
} }
fn load_checkpoint(&self) -> Result<Vec<u8>> { fn load_checkpoint(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new(); let mut f = fs::OpenOptions::new()
let res = fs::OpenOptions::new()
.read(true) .read(true)
.open(self.data_dir.join("checkpoint")) .open(self.data_dir.join("checkpoint"))?;
.map(|mut f| f.read_to_end(&mut buf));
if buf.len() == 32 && res.is_ok() { let mut buf = Vec::new();
Ok(buf) f.read_to_end(&mut buf)?;
} else {
Ok(self.default_checkpoint.clone()) Ok(buf)
}
}
}
pub struct ConfigDB {
checkpoint: Vec<u8>,
}
impl Database for ConfigDB {
fn new(config: &Config) -> Result<Self> {
Ok(Self {
checkpoint: config
.checkpoint
.clone()
.unwrap_or(config.default_checkpoint.clone()),
})
}
fn load_checkpoint(&self) -> Result<Vec<u8>> {
Ok(self.checkpoint.clone())
}
fn save_checkpoint(&self, _checkpoint: Vec<u8>) -> Result<()> {
Ok(())
} }
} }

View File

@ -7,10 +7,7 @@ use thiserror::Error;
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum NodeError { pub enum NodeError {
#[error(transparent)] #[error(transparent)]
ExecutionEvmError(#[from] EvmError), ExecutionError(#[from] EvmError),
#[error("execution error: {0}")]
ExecutionError(Report),
#[error("out of sync: {0} slots behind")] #[error("out of sync: {0} slots behind")]
OutOfSync(u64), OutOfSync(u64),
@ -33,15 +30,17 @@ pub enum NodeError {
#[error("consensus sync error: {0}")] #[error("consensus sync error: {0}")]
ConsensusSyncError(Report), ConsensusSyncError(Report),
#[error("p2p error: {0}")]
P2PError(Report),
#[error(transparent)] #[error(transparent)]
BlockNotFoundError(#[from] BlockNotFoundError), BlockNotFoundError(#[from] BlockNotFoundError),
} }
#[cfg(not(target_arch = "wasm32"))]
impl NodeError { impl NodeError {
pub fn to_json_rpsee_error(self) -> jsonrpsee::core::Error { pub fn to_json_rpsee_error(self) -> jsonrpsee::core::Error {
match self { match self {
NodeError::ExecutionEvmError(evm_err) => match evm_err { NodeError::ExecutionError(evm_err) => match evm_err {
EvmError::Revert(data) => { EvmError::Revert(data) => {
let mut msg = "execution reverted".to_string(); let mut msg = "execution reverted".to_string();
if let Some(reason) = data.as_ref().and_then(EvmError::decode_revert_reason) { if let Some(reason) = data.as_ref().and_then(EvmError::decode_revert_reason) {

View File

@ -3,8 +3,6 @@ pub use crate::client::*;
pub mod database; pub mod database;
pub mod errors; pub mod errors;
#[cfg(not(target_arch = "wasm32"))]
pub mod rpc; pub mod rpc;
pub mod node; mod node;

View File

@ -3,39 +3,42 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use ethers::prelude::{Address, U256}; use ethers::prelude::{Address, U256};
use ethers::types::{ use ethers::types::{Filter, Log, Transaction, TransactionReceipt, H256};
FeeHistory, Filter, Log, SyncProgress, SyncingStatus, Transaction, TransactionReceipt, H256,
};
use eyre::{eyre, Result}; use eyre::{eyre, Result};
use common::errors::BlockNotFoundError; use common::errors::BlockNotFoundError;
use common::types::BlockTag; use common::types::BlockTag;
use config::Config; use config::Config;
use consensus::rpc::nimbus_rpc::NimbusRpc; use consensus::rpc::nimbus_rpc::NimbusRpc;
use consensus::types::{ExecutionPayload, Header}; use consensus::types::{ExecutionPayload, Header};
use consensus::ConsensusClient; use consensus::ConsensusClient;
use discv5::{enr, Discv5, Discv5ConfigBuilder, Discv5Event};
use execution::evm::Evm; use execution::evm::Evm;
use execution::rpc::http_rpc::HttpRpc; use execution::rpc::http_rpc::HttpRpc;
use execution::types::{CallOpts, ExecutionBlock}; use execution::types::{CallOpts, ExecutionBlock};
use execution::ExecutionClient; use execution::ExecutionClient;
use futures::StreamExt;
use libp2p::swarm::SwarmEvent;
use libp2p::{identity, ping, Multiaddr, PeerId, Swarm};
use std::net::SocketAddr;
use crate::errors::NodeError; use crate::errors::NodeError;
pub struct Node { pub struct Node {
key: identity::Keypair,
pub consensus: ConsensusClient<NimbusRpc>, pub consensus: ConsensusClient<NimbusRpc>,
pub execution: Arc<ExecutionClient<HttpRpc>>, pub execution: Arc<ExecutionClient<HttpRpc>>,
pub config: Arc<Config>, pub config: Arc<Config>,
payloads: BTreeMap<u64, ExecutionPayload>, payloads: BTreeMap<u64, ExecutionPayload>,
finalized_payloads: BTreeMap<u64, ExecutionPayload>, finalized_payloads: BTreeMap<u64, ExecutionPayload>,
current_slot: Option<u64>,
pub history_size: usize, pub history_size: usize,
disc: Option<Arc<Discv5>>,
} }
impl Node { impl Node {
pub fn new(config: Arc<Config>) -> Result<Self, NodeError> { pub fn new(config: Arc<Config>) -> Result<Self, NodeError> {
let consensus_rpc = &config.consensus_rpc; let consensus_rpc = &config.consensus_rpc;
let checkpoint_hash = &config.checkpoint.as_ref().unwrap(); let checkpoint_hash = &config.checkpoint;
let execution_rpc = &config.execution_rpc; let execution_rpc = &config.execution_rpc;
let consensus = ConsensusClient::new(consensus_rpc, checkpoint_hash, config.clone()) let consensus = ConsensusClient::new(consensus_rpc, checkpoint_hash, config.clone())
@ -47,34 +50,144 @@ impl Node {
let payloads = BTreeMap::new(); let payloads = BTreeMap::new();
let finalized_payloads = BTreeMap::new(); let finalized_payloads = BTreeMap::new();
let key = identity::Keypair::generate_ed25519();
Ok(Node { Ok(Node {
key,
consensus, consensus,
execution, execution,
config, config,
payloads, payloads,
finalized_payloads, finalized_payloads,
current_slot: None,
history_size: 64, history_size: 64,
disc: None,
}) })
} }
#[cfg(feature = "p2p")]
/// Syncs
pub async fn sync(&mut self) -> Result<(), NodeError> { pub async fn sync(&mut self) -> Result<(), NodeError> {
let chain_id = self.config.chain.chain_id; self.start_p2p().await;
self.execution
.check_rpc(chain_id)
.await
.map_err(NodeError::ExecutionError)?;
self.consensus
.check_rpc()
.await
.map_err(NodeError::ConsensusSyncError)?;
self.consensus self.consensus
.sync() .sync()
.await .await
.map_err(NodeError::ConsensusSyncError)?; .map_err(NodeError::ConsensusSyncError)?;
self.update_payloads().await
}
/// Starts the p2p discovery server
pub async fn start_p2p(&mut self) -> Result<(), NodeError> {
// listening address and port
let listen_addr = "0.0.0.0:9000"
.parse::<SocketAddr>()
.map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
// construct a local ENR
let enr_key = common::utils::from_libp2p(&self.key)
.map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
let enr = enr::EnrBuilder::new("v4")
.build(&enr_key)
.map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
// default configuration
let config = Discv5ConfigBuilder::new().build();
// construct the discv5 server
let discv5 =
Discv5::new(enr, enr_key, config).map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
// Set the server
let server = Arc::new(discv5);
self.disc = Some(Arc::clone(&server));
// Start
let mut cloned = Arc::clone(&server);
tokio::spawn(async move {
if let Some(serv) = Arc::get_mut(&mut cloned) {
if let Err(e) = serv.start(listen_addr).await {
log::warn!("Failed to start p2p discovery server. Error: {:?}", e);
}
let mut event_stream = serv.event_stream().await.unwrap();
loop {
match event_stream.recv().await {
Some(Discv5Event::SocketUpdated(addr)) => {
println!("Nodes ENR socket address has been updated to: {addr:?}");
}
Some(Discv5Event::Discovered(enr)) => {
println!("A peer has been discovered: {}", enr.node_id());
}
_ => {}
}
}
} else {
log::warn!("Failed to get mutable reference to the discv5 p2p discovery server inside client node.");
}
});
Ok(())
}
/// Swarm all connected peers on the discovery server
pub async fn p2p_connect(&self) -> Result<(), NodeError> {
// Transform the local keypair to a CombinedKey
let local_key = &self.key;
let local_peer_id = PeerId::from(local_key.public());
log::info!("Local peer id: {:?}", local_peer_id);
let transport = libp2p::development_transport(local_key.clone())
.await
.map_err(|_| NodeError::P2PError(eyre::eyre!("Failed to create libp2p transport")))?;
log::debug!("Created libp2p transport");
// Create a ping network behaviour.
//
// For illustrative purposes, the ping protocol is configured to
// keep the connection alive, so a continuous sequence of pings
// can be observed.
let behaviour = ping::Behaviour::new(ping::Config::new());
let mut swarm = Swarm::with_threadpool_executor(transport, behaviour, local_peer_id);
log::debug!("Created libp2p swarm");
// Tell the swarm to listen on all interfaces and a random, OS-assigned
// port.
let addr = "/ip4/0.0.0.0/tcp/0"
.parse()
.map_err(|_| NodeError::P2PError(eyre::eyre!("Failed to parse Multiaddr string.")))?;
log::debug!("Swarm listening on {addr:?}");
swarm
.listen_on(addr)
.map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
// Dial the peer identified by the multi-address given as the second
// command-line argument, if any.
if let Some(addr) = std::env::args().nth(1) {
let remote: Multiaddr = addr.parse().map_err(|_| {
NodeError::P2PError(eyre::eyre!("Failed to parse Multiaddr string."))
})?;
swarm
.dial(remote)
.map_err(|e| NodeError::P2PError(eyre::eyre!(e)))?;
log::info!("Dialed {}", addr)
}
loop {
match swarm.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => {
log::info!("Listening on {address:?}")
}
SwarmEvent::Behaviour(event) => log::info!("{event:?}"),
_ => {}
}
}
}
#[cfg(not(feature = "p2p"))]
pub async fn sync(&mut self) -> Result<(), NodeError> {
self.consensus
.sync()
.await
.map_err(NodeError::ConsensusSyncError)?;
self.update_payloads().await self.update_payloads().await
} }
@ -109,32 +222,18 @@ impl Node {
.map_err(NodeError::ConsensusPayloadError)?; .map_err(NodeError::ConsensusPayloadError)?;
self.payloads self.payloads
.insert(*latest_payload.block_number(), latest_payload); .insert(latest_payload.block_number, latest_payload);
self.payloads self.payloads
.insert(*finalized_payload.block_number(), finalized_payload.clone()); .insert(finalized_payload.block_number, finalized_payload.clone());
self.finalized_payloads self.finalized_payloads
.insert(*finalized_payload.block_number(), finalized_payload); .insert(finalized_payload.block_number, finalized_payload);
let start_slot = self
.current_slot
.unwrap_or(latest_header.slot - self.history_size as u64);
let backfill_payloads = self
.consensus
.get_payloads(start_slot, latest_header.slot)
.await
.map_err(NodeError::ConsensusPayloadError)?;
for payload in backfill_payloads {
self.payloads.insert(*payload.block_number(), payload);
}
self.current_slot = Some(latest_header.slot);
while self.payloads.len() > self.history_size { while self.payloads.len() > self.history_size {
self.payloads.pop_first(); self.payloads.pop_first();
} }
// only save one finalized block per epoch // only save one finalized block per epoch
// finality updates only occur on epoch boundaries // finality updates only occur on epoch boundries
while self.finalized_payloads.len() > usize::max(self.history_size / 32, 1) { while self.finalized_payloads.len() > usize::max(self.history_size / 32, 1) {
self.finalized_payloads.pop_first(); self.finalized_payloads.pop_first();
} }
@ -152,7 +251,7 @@ impl Node {
&self.payloads, &self.payloads,
self.chain_id(), self.chain_id(),
); );
evm.call(opts).await.map_err(NodeError::ExecutionEvmError) evm.call(opts).await.map_err(NodeError::ExecutionError)
} }
pub async fn estimate_gas(&self, opts: &CallOpts) -> Result<u64, NodeError> { pub async fn estimate_gas(&self, opts: &CallOpts) -> Result<u64, NodeError> {
@ -167,7 +266,7 @@ impl Node {
); );
evm.estimate_gas(opts) evm.estimate_gas(opts)
.await .await
.map_err(NodeError::ExecutionEvmError) .map_err(NodeError::ExecutionError)
} }
pub async fn get_balance(&self, address: &Address, block: BlockTag) -> Result<U256> { pub async fn get_balance(&self, address: &Address, block: BlockTag) -> Result<U256> {
@ -188,14 +287,14 @@ impl Node {
pub fn get_block_transaction_count_by_hash(&self, hash: &Vec<u8>) -> Result<u64> { pub fn get_block_transaction_count_by_hash(&self, hash: &Vec<u8>) -> Result<u64> {
let payload = self.get_payload_by_hash(hash)?; let payload = self.get_payload_by_hash(hash)?;
let transaction_count = payload.1.transactions().len(); let transaction_count = payload.1.transactions.len();
Ok(transaction_count as u64) Ok(transaction_count as u64)
} }
pub fn get_block_transaction_count_by_number(&self, block: BlockTag) -> Result<u64> { pub fn get_block_transaction_count_by_number(&self, block: BlockTag) -> Result<u64> {
let payload = self.get_payload(block)?; let payload = self.get_payload(block)?;
let transaction_count = payload.transactions().len(); let transaction_count = payload.transactions.len();
Ok(transaction_count as u64) Ok(transaction_count as u64)
} }
@ -248,18 +347,6 @@ impl Node {
.await .await
} }
pub async fn get_transaction_by_block_hash_and_index(
&self,
hash: &Vec<u8>,
index: usize,
) -> Result<Option<Transaction>> {
let payload = self.get_payload_by_hash(hash)?;
self.execution
.get_transaction_by_block_hash_and_index(payload.1, index)
.await
}
pub async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>> { pub async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>> {
self.execution.get_logs(filter, &self.payloads).await self.execution.get_logs(filter, &self.payloads).await
} }
@ -269,7 +356,7 @@ impl Node {
self.check_head_age()?; self.check_head_age()?;
let payload = self.get_payload(BlockTag::Latest)?; let payload = self.get_payload(BlockTag::Latest)?;
let base_fee = U256::from_little_endian(&payload.base_fee_per_gas().to_bytes_le()); let base_fee = U256::from_little_endian(&payload.base_fee_per_gas.to_bytes_le());
let tip = U256::from(10_u64.pow(9)); let tip = U256::from(10_u64.pow(9));
Ok(base_fee + tip) Ok(base_fee + tip)
} }
@ -284,7 +371,7 @@ impl Node {
self.check_head_age()?; self.check_head_age()?;
let payload = self.get_payload(BlockTag::Latest)?; let payload = self.get_payload(BlockTag::Latest)?;
Ok(*payload.block_number()) Ok(payload.block_number)
} }
pub async fn get_block_by_number( pub async fn get_block_by_number(
@ -300,17 +387,6 @@ impl Node {
} }
} }
pub async fn get_fee_history(
&self,
block_count: u64,
last_block: u64,
reward_percentiles: &[f64],
) -> Result<Option<FeeHistory>> {
self.execution
.get_fee_history(block_count, last_block, reward_percentiles, &self.payloads)
.await
}
pub async fn get_block_by_hash( pub async fn get_block_by_hash(
&self, &self,
hash: &Vec<u8>, hash: &Vec<u8>,
@ -328,49 +404,11 @@ impl Node {
self.config.chain.chain_id self.config.chain.chain_id
} }
pub fn syncing(&self) -> Result<SyncingStatus> {
if self.check_head_age().is_ok() {
Ok(SyncingStatus::IsFalse)
} else {
let latest_synced_block = self.get_block_number()?;
let oldest_payload = self.payloads.first_key_value();
let oldest_synced_block =
oldest_payload.map_or(latest_synced_block, |(key, _value)| *key);
let highest_block = self.consensus.expected_current_slot();
Ok(SyncingStatus::IsSyncing(Box::new(SyncProgress {
current_block: latest_synced_block.into(),
highest_block: highest_block.into(),
starting_block: oldest_synced_block.into(),
pulled_states: None,
known_states: None,
healed_bytecode_bytes: None,
healed_bytecodes: None,
healed_trienode_bytes: None,
healed_trienodes: None,
healing_bytecode: None,
healing_trienodes: None,
synced_account_bytes: None,
synced_accounts: None,
synced_bytecode_bytes: None,
synced_bytecodes: None,
synced_storage: None,
synced_storage_bytes: None,
})))
}
}
pub fn get_header(&self) -> Result<Header> { pub fn get_header(&self) -> Result<Header> {
self.check_head_age()?; self.check_head_age()?;
Ok(self.consensus.get_header().clone()) Ok(self.consensus.get_header().clone())
} }
pub fn get_coinbase(&self) -> Result<Address> {
self.check_head_age()?;
let payload = self.get_payload(BlockTag::Latest)?;
let coinbase_address = Address::from_slice(payload.fee_recipient());
Ok(coinbase_address)
}
pub fn get_last_checkpoint(&self) -> Option<Vec<u8>> { pub fn get_last_checkpoint(&self) -> Option<Vec<u8>> {
self.consensus.last_checkpoint.clone() self.consensus.last_checkpoint.clone()
} }
@ -398,7 +436,7 @@ impl Node {
let payloads = self let payloads = self
.payloads .payloads
.iter() .iter()
.filter(|entry| &entry.1.block_hash().to_vec() == hash) .filter(|entry| &entry.1.block_hash.to_vec() == hash)
.collect::<Vec<(&u64, &ExecutionPayload)>>(); .collect::<Vec<(&u64, &ExecutionPayload)>>();
payloads payloads

View File

@ -1,6 +1,6 @@
use ethers::{ use ethers::{
abi::AbiEncode, abi::AbiEncode,
types::{Address, Filter, Log, SyncingStatus, Transaction, TransactionReceipt, H256, U256}, types::{Address, Filter, Log, Transaction, TransactionReceipt, H256, U256},
}; };
use eyre::Result; use eyre::Result;
use log::info; use log::info;
@ -97,12 +97,6 @@ trait EthRpc {
) -> Result<Option<TransactionReceipt>, Error>; ) -> Result<Option<TransactionReceipt>, Error>;
#[method(name = "getTransactionByHash")] #[method(name = "getTransactionByHash")]
async fn get_transaction_by_hash(&self, hash: &str) -> Result<Option<Transaction>, Error>; async fn get_transaction_by_hash(&self, hash: &str) -> Result<Option<Transaction>, Error>;
#[method(name = "getTransactionByBlockHashAndIndex")]
async fn get_transaction_by_block_hash_and_index(
&self,
hash: &str,
index: usize,
) -> Result<Option<Transaction>, Error>;
#[method(name = "getLogs")] #[method(name = "getLogs")]
async fn get_logs(&self, filter: Filter) -> Result<Vec<Log>, Error>; async fn get_logs(&self, filter: Filter) -> Result<Vec<Log>, Error>;
#[method(name = "getStorageAt")] #[method(name = "getStorageAt")]
@ -112,10 +106,6 @@ trait EthRpc {
slot: H256, slot: H256,
block: BlockTag, block: BlockTag,
) -> Result<String, Error>; ) -> Result<String, Error>;
#[method(name = "getCoinbase")]
async fn get_coinbase(&self) -> Result<Address, Error>;
#[method(name = "syncing")]
async fn syncing(&self) -> Result<SyncingStatus, Error>;
} }
#[rpc(client, server, namespace = "net")] #[rpc(client, server, namespace = "net")]
@ -262,29 +252,6 @@ impl EthRpcServer for RpcInner {
convert_err(node.get_transaction_by_hash(&hash).await) convert_err(node.get_transaction_by_hash(&hash).await)
} }
async fn get_transaction_by_block_hash_and_index(
&self,
hash: &str,
index: usize,
) -> Result<Option<Transaction>, Error> {
let hash = convert_err(hex_str_to_bytes(hash))?;
let node = self.node.read().await;
convert_err(
node.get_transaction_by_block_hash_and_index(&hash, index)
.await,
)
}
async fn get_coinbase(&self) -> Result<Address, Error> {
let node = self.node.read().await;
Ok(node.get_coinbase().unwrap())
}
async fn syncing(&self) -> Result<SyncingStatus, Error> {
let node = self.node.read().await;
convert_err(node.syncing())
}
async fn get_logs(&self, filter: Filter) -> Result<Vec<Log>, Error> { async fn get_logs(&self, filter: Filter) -> Result<Vec<Log>, Error> {
let node = self.node.read().await; let node = self.node.read().await;
convert_err(node.get_logs(&filter).await) convert_err(node.get_logs(&filter).await)
@ -342,11 +309,5 @@ fn format_hex(num: &U256) -> String {
.trim_start_matches('0') .trim_start_matches('0')
.to_string(); .to_string();
let stripped = if stripped.is_empty() {
"0".to_string()
} else {
stripped
};
format!("0x{stripped}") format!("0x{stripped}")
} }

View File

@ -1,12 +1,14 @@
[package] [package]
name = "common" name = "common"
version = "0.3.0" version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
eyre = "0.6.8" eyre = "0.6.8"
serde = { version = "1.0.143", features = ["derive"] } serde = { version = "1.0.143", features = ["derive"] }
hex = "0.4.3" hex = "0.4.3"
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "d09f55b4f8554491e3431e01af1c32347a8781cd" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f18ca919cc1b685b861d0fa9e2daabe89737" }
ethers = "1.0.0" ethers = "1.0.2"
thiserror = "1.0.37" thiserror = "1.0.37"
libp2p-core = { version = "0.38.0", features = ["secp256k1"] }
discv5 = { version = "0.1.0", features = ["libp2p", "libp2p-core"] }

View File

@ -1,4 +1,3 @@
use ethers::types::H256;
use thiserror::Error; use thiserror::Error;
use crate::types::BlockTag; use crate::types::BlockTag;
@ -15,18 +14,6 @@ impl BlockNotFoundError {
} }
} }
#[derive(Debug, Error)]
#[error("slot not found: {slot:?}")]
pub struct SlotNotFoundError {
slot: H256,
}
impl SlotNotFoundError {
pub fn new(slot: H256) -> Self {
Self { slot }
}
}
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("rpc error on method: {method}, message: {error}")] #[error("rpc error on method: {method}, message: {error}")]
pub struct RpcError<E: ToString> { pub struct RpcError<E: ToString> {

View File

@ -1,5 +1,7 @@
use discv5::enr::{self, CombinedKey};
use ethers::prelude::Address; use ethers::prelude::Address;
use eyre::Result; use eyre::Result;
use libp2p_core::identity::Keypair;
use ssz_rs::{Node, Vector}; use ssz_rs::{Node, Vector};
use super::types::Bytes32; use super::types::Bytes32;
@ -24,3 +26,20 @@ pub fn address_to_hex_string(address: &Address) -> String {
pub fn u64_to_hex_string(val: u64) -> String { pub fn u64_to_hex_string(val: u64) -> String {
format!("0x{val:x}") format!("0x{val:x}")
} }
/// Transforms a [Keypair](libp2p_core::identity::Keypair) into a [CombinedKey].
pub fn from_libp2p(key: &libp2p_core::identity::Keypair) -> Result<CombinedKey, &'static str> {
match key {
Keypair::Secp256k1(key) => {
let secret = enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret))
}
Keypair::Ed25519(key) => {
let ed_keypair = enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32])
.expect("libp2p key must be valid");
Ok(CombinedKey::from(ed_keypair))
}
_ => Err("ENR: Unsupported libp2p key type"),
}
}

View File

@ -1,24 +1,24 @@
[package] [package]
name = "config" name = "config"
version = "0.3.0" version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
tokio = { version = "1", features = ["full"] }
eyre = "0.6.8" eyre = "0.6.8"
serde = { version = "1.0.143", features = ["derive"] } serde = { version = "1.0.143", features = ["derive"] }
hex = "0.4.3" hex = "0.4.3"
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "d09f55b4f8554491e3431e01af1c32347a8781cd" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f18ca919cc1b685b861d0fa9e2daabe89737" }
ethers = "1.0.0" ethers = "1.0.2"
figment = { version = "0.10.7", features = ["toml", "env"] } figment = { version = "0.10.7", features = ["toml", "env"] }
thiserror = "1.0.37" thiserror = "1.0.37"
log = "0.4.17" log = "0.4.17"
common = { path = "../common" }
reqwest = "0.11.13" reqwest = "0.11.13"
serde_yaml = "0.9.14" serde_yaml = "0.9.14"
strum = "0.24.1" strum = "0.24.1"
futures = "0.3.25" futures = "0.3.25"
common = { path = "../common" }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
tokio = { version = "1", features = ["full"] }

View File

@ -12,7 +12,7 @@ pub struct BaseConfig {
deserialize_with = "bytes_deserialize", deserialize_with = "bytes_deserialize",
serialize_with = "bytes_serialize" serialize_with = "bytes_serialize"
)] )]
pub default_checkpoint: Vec<u8>, pub checkpoint: Vec<u8>,
pub chain: ChainConfig, pub chain: ChainConfig,
pub forks: Forks, pub forks: Forks,
pub max_checkpoint_age: u64, pub max_checkpoint_age: u64,

View File

@ -136,36 +136,28 @@ impl CheckpointFallback {
// Iterate over all mainnet checkpoint sync services and get the latest checkpoint slot for each. // Iterate over all mainnet checkpoint sync services and get the latest checkpoint slot for each.
let tasks: Vec<_> = services let tasks: Vec<_> = services
.iter() .iter()
.map(|service| async move { .map(|service| {
let service = service.clone(); let service = service.clone();
match Self::query_service(&service.endpoint).await { tokio::spawn(async move {
Some(raw) => { match Self::query_service(&service.endpoint).await {
if raw.data.slots.is_empty() { Some(raw) => {
return Err(eyre::eyre!("no slots")); if raw.data.slots.is_empty() {
return Err(eyre::eyre!("no slots"));
}
Ok(raw.data.slots[0].clone())
} }
None => Err(eyre::eyre!("failed to query service")),
let slot = raw
.data
.slots
.iter()
.find(|s| s.block_root.is_some())
.ok_or(eyre::eyre!("no valid slots"))?;
Ok(slot.clone())
} }
None => Err(eyre::eyre!("failed to query service")), })
}
}) })
.collect(); .collect();
let slots = futures::future::join_all(tasks) let slots = futures::future::join_all(tasks)
.await .await
.iter() .iter()
.filter_map(|slot| match &slot { .filter_map(|slot| match &slot {
Ok(s) => Some(s.clone()), Ok(Ok(s)) => Some(s.clone()),
_ => None, _ => None,
}) })
.filter(|s| s.block_root.is_some())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Get the max epoch // Get the max epoch

View File

@ -13,7 +13,6 @@ pub struct CliConfig {
pub data_dir: PathBuf, pub data_dir: PathBuf,
pub fallback: Option<String>, pub fallback: Option<String>,
pub load_external_fallback: bool, pub load_external_fallback: bool,
pub strict_checkpoint_age: bool,
} }
impl CliConfig { impl CliConfig {
@ -47,11 +46,6 @@ impl CliConfig {
Value::from(self.load_external_fallback), Value::from(self.load_external_fallback),
); );
user_dict.insert(
"strict_checkpoint_age",
Value::from(self.strict_checkpoint_age),
);
Serialized::from(user_dict, network) Serialized::from(user_dict, network)
} }
} }

View File

@ -2,32 +2,31 @@ use figment::{
providers::{Format, Serialized, Toml}, providers::{Format, Serialized, Toml},
Figment, Figment,
}; };
use serde::Deserialize; use serde::{Deserialize, Serialize};
use std::{path::PathBuf, process::exit}; use std::{path::PathBuf, process::exit};
use crate::base::BaseConfig; use crate::base::BaseConfig;
use crate::cli::CliConfig; use crate::cli::CliConfig;
use crate::networks; use crate::networks;
use crate::types::{ChainConfig, Forks}; use crate::types::{ChainConfig, Forks};
use crate::utils::{bytes_deserialize, bytes_opt_deserialize}; use crate::utils::{bytes_deserialize, bytes_serialize};
#[derive(Deserialize, Debug, Default)] #[derive(Serialize, Deserialize, Debug, Default)]
pub struct Config { pub struct Config {
pub consensus_rpc: String, pub consensus_rpc: String,
pub execution_rpc: String, pub execution_rpc: String,
pub rpc_port: Option<u16>, pub rpc_port: Option<u16>,
#[serde(deserialize_with = "bytes_deserialize")] #[serde(
pub default_checkpoint: Vec<u8>, deserialize_with = "bytes_deserialize",
#[serde(default)] serialize_with = "bytes_serialize"
#[serde(deserialize_with = "bytes_opt_deserialize")] )]
pub checkpoint: Option<Vec<u8>>, pub checkpoint: Vec<u8>,
pub data_dir: Option<PathBuf>, pub data_dir: Option<PathBuf>,
pub chain: ChainConfig, pub chain: ChainConfig,
pub forks: Forks, pub forks: Forks,
pub max_checkpoint_age: u64, pub max_checkpoint_age: u64,
pub fallback: Option<String>, pub fallback: Option<String>,
pub load_external_fallback: bool, pub load_external_fallback: bool,
pub strict_checkpoint_age: bool,
} }
impl Config { impl Config {
@ -73,9 +72,7 @@ impl Config {
pub fn fork_version(&self, slot: u64) -> Vec<u8> { pub fn fork_version(&self, slot: u64) -> Vec<u8> {
let epoch = slot / 32; let epoch = slot / 32;
if epoch >= self.forks.capella.epoch { if epoch >= self.forks.bellatrix.epoch {
self.forks.capella.fork_version.clone()
} else if epoch >= self.forks.bellatrix.epoch {
self.forks.bellatrix.fork_version.clone() self.forks.bellatrix.fork_version.clone()
} else if epoch >= self.forks.altair.epoch { } else if epoch >= self.forks.altair.epoch {
self.forks.altair.fork_version.clone() self.forks.altair.fork_version.clone()
@ -88,7 +85,7 @@ impl Config {
BaseConfig { BaseConfig {
rpc_port: self.rpc_port.unwrap_or(8545), rpc_port: self.rpc_port.unwrap_or(8545),
consensus_rpc: Some(self.consensus_rpc.clone()), consensus_rpc: Some(self.consensus_rpc.clone()),
default_checkpoint: self.default_checkpoint.clone(), checkpoint: self.checkpoint.clone(),
chain: self.chain.clone(), chain: self.chain.clone(),
forks: self.forks.clone(), forks: self.forks.clone(),
max_checkpoint_age: self.max_checkpoint_age, max_checkpoint_age: self.max_checkpoint_age,

View File

@ -35,8 +35,8 @@ impl Network {
pub fn mainnet() -> BaseConfig { pub fn mainnet() -> BaseConfig {
BaseConfig { BaseConfig {
default_checkpoint: hex_str_to_bytes( checkpoint: hex_str_to_bytes(
"0x766647f3c4e1fc91c0db9a9374032ae038778411fbff222974e11f2e3ce7dadf", "0x428ce0b5f5bbed1fc2b3feb5d4152ae0fe98a80b1bfa8de36681868e81e9222a",
) )
.unwrap(), .unwrap(),
rpc_port: 8545, rpc_port: 8545,
@ -62,10 +62,6 @@ pub fn mainnet() -> BaseConfig {
epoch: 144896, epoch: 144896,
fork_version: hex_str_to_bytes("0x02000000").unwrap(), fork_version: hex_str_to_bytes("0x02000000").unwrap(),
}, },
capella: Fork {
epoch: u64::MAX, // TODO: set epoch when known
fork_version: hex_str_to_bytes("0x03000000").unwrap(),
},
}, },
max_checkpoint_age: 1_209_600, // 14 days max_checkpoint_age: 1_209_600, // 14 days
} }
@ -73,7 +69,7 @@ pub fn mainnet() -> BaseConfig {
pub fn goerli() -> BaseConfig { pub fn goerli() -> BaseConfig {
BaseConfig { BaseConfig {
default_checkpoint: hex_str_to_bytes( checkpoint: hex_str_to_bytes(
"0xd4344682866dbede543395ecf5adf9443a27f423a4b00f270458e7932686ced1", "0xd4344682866dbede543395ecf5adf9443a27f423a4b00f270458e7932686ced1",
) )
.unwrap(), .unwrap(),
@ -100,10 +96,6 @@ pub fn goerli() -> BaseConfig {
epoch: 112260, epoch: 112260,
fork_version: hex_str_to_bytes("0x02001020").unwrap(), fork_version: hex_str_to_bytes("0x02001020").unwrap(),
}, },
capella: Fork {
epoch: 162304,
fork_version: hex_str_to_bytes("0x03001020").unwrap(),
},
}, },
max_checkpoint_age: 1_209_600, // 14 days max_checkpoint_age: 1_209_600, // 14 days
} }

View File

@ -18,7 +18,6 @@ pub struct Forks {
pub genesis: Fork, pub genesis: Fork,
pub altair: Fork, pub altair: Fork,
pub bellatrix: Fork, pub bellatrix: Fork,
pub capella: Fork,
} }
#[derive(Serialize, Deserialize, Debug, Default, Clone)] #[derive(Serialize, Deserialize, Debug, Default, Clone)]

View File

@ -15,15 +15,3 @@ where
let bytes_string = hex::encode(bytes); let bytes_string = hex::encode(bytes);
serializer.serialize_str(&bytes_string) serializer.serialize_str(&bytes_string)
} }
pub fn bytes_opt_deserialize<'de, D>(deserializer: D) -> Result<Option<Vec<u8>>, D::Error>
where
D: serde::Deserializer<'de>,
{
let bytes_opt: Option<String> = serde::Deserialize::deserialize(deserializer)?;
if let Some(bytes) = bytes_opt {
Ok(Some(hex_str_to_bytes(&bytes).unwrap()))
} else {
Ok(None)
}
}

View File

@ -1,33 +1,27 @@
[package] [package]
name = "consensus" name = "consensus"
version = "0.3.0" version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
tokio = { version = "1", features = ["full"] }
eyre = "0.6.8" eyre = "0.6.8"
futures = "0.3.23"
serde = { version = "1.0.143", features = ["derive"] } serde = { version = "1.0.143", features = ["derive"] }
serde_json = "1.0.85" serde_json = "1.0.85"
hex = "0.4.3" hex = "0.4.3"
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "d09f55b4f8554491e3431e01af1c32347a8781cd" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f18ca919cc1b685b861d0fa9e2daabe89737" }
milagro_bls = { git = "https://github.com/Snowfork/milagro_bls" } blst = "0.3.10"
ethers = "1.0.0" ethers = "1.0.2"
bytes = "1.2.1" bytes = "1.2.1"
toml = "0.5.9" toml = "0.5.9"
async-trait = "0.1.57" async-trait = "0.1.57"
log = "0.4.17" log = "0.4.17"
chrono = "0.4.23" chrono = "0.4.22"
thiserror = "1.0.37" thiserror = "1.0.37"
reqwest = { version = "0.11.13", features = ["json"] } openssl = { version = "0.10", features = ["vendored"] }
superstruct = "0.7.0" reqwest = { version = "0.11.12", features = ["json"] }
reqwest-middleware = "0.1.6"
reqwest-retry = "0.1.5"
common = { path = "../common" } common = { path = "../common" }
config = { path = "../config" } config = { path = "../config" }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
openssl = { version = "0.10", features = ["vendored"] }
tokio = { version = "1", features = ["full"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-timer = "0.2.5"

View File

@ -1,13 +1,12 @@
use std::cmp; use std::cmp;
use std::sync::Arc; use std::sync::Arc;
use std::time::UNIX_EPOCH;
use blst::min_pk::PublicKey;
use chrono::Duration; use chrono::Duration;
use eyre::eyre; use eyre::eyre;
use eyre::Result; use eyre::Result;
use futures::future::join_all;
use log::warn;
use log::{debug, info}; use log::{debug, info};
use milagro_bls::PublicKey;
use ssz_rs::prelude::*; use ssz_rs::prelude::*;
use common::types::*; use common::types::*;
@ -21,20 +20,9 @@ use super::rpc::ConsensusRpc;
use super::types::*; use super::types::*;
use super::utils::*; use super::utils::*;
#[cfg(not(target_arch = "wasm32"))]
use std::time::SystemTime;
#[cfg(not(target_arch = "wasm32"))]
use std::time::UNIX_EPOCH;
#[cfg(target_arch = "wasm32")]
use wasm_timer::SystemTime;
#[cfg(target_arch = "wasm32")]
use wasm_timer::UNIX_EPOCH;
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md
// does not implement force updates // does not implement force updates
#[derive(Debug)]
pub struct ConsensusClient<R: ConsensusRpc> { pub struct ConsensusClient<R: ConsensusRpc> {
rpc: R, rpc: R,
store: LightClientStore, store: LightClientStore,
@ -70,16 +58,6 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
}) })
} }
pub async fn check_rpc(&self) -> Result<()> {
let chain_id = self.rpc.chain_id().await?;
if chain_id != self.config.chain.chain_id {
Err(ConsensusError::IncorrectRpcNetwork.into())
} else {
Ok(())
}
}
pub async fn get_execution_payload(&self, slot: &Option<u64>) -> Result<ExecutionPayload> { pub async fn get_execution_payload(&self, slot: &Option<u64>) -> Result<ExecutionPayload> {
let slot = slot.unwrap_or(self.store.optimistic_header.slot); let slot = slot.unwrap_or(self.store.optimistic_header.slot);
let mut block = self.rpc.get_block(slot).await?; let mut block = self.rpc.get_block(slot).await?;
@ -103,50 +81,10 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
) )
.into()) .into())
} else { } else {
Ok(block.body.execution_payload().clone()) Ok(block.body.execution_payload)
} }
} }
pub async fn get_payloads(
&self,
start_slot: u64,
end_slot: u64,
) -> Result<Vec<ExecutionPayload>> {
let payloads_fut = (start_slot..end_slot)
.rev()
.map(|slot| self.rpc.get_block(slot));
let mut prev_parent_hash: Bytes32 = self
.rpc
.get_block(end_slot)
.await?
.body
.execution_payload()
.parent_hash()
.clone();
let mut payloads: Vec<ExecutionPayload> = Vec::new();
for result in join_all(payloads_fut).await {
if result.is_err() {
continue;
}
let payload = result.unwrap().body.execution_payload().clone();
if payload.block_hash() != &prev_parent_hash {
warn!(
"error while backfilling blocks: {}",
ConsensusError::InvalidHeaderHash(
format!("{prev_parent_hash:02X?}"),
format!("{:02X?}", payload.parent_hash()),
)
);
break;
}
prev_parent_hash = payload.parent_hash().clone();
payloads.push(payload);
}
Ok(payloads)
}
pub fn get_header(&self) -> &Header { pub fn get_header(&self) -> &Header {
&self.store.optimistic_header &self.store.optimistic_header
} }
@ -156,6 +94,10 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
} }
pub async fn sync(&mut self) -> Result<()> { pub async fn sync(&mut self) -> Result<()> {
info!(
"Consensus client in sync with checkpoint: 0x{}",
hex::encode(&self.initial_checkpoint)
);
self.bootstrap().await?; self.bootstrap().await?;
let current_period = calc_sync_period(self.store.finalized_header.slot); let current_period = calc_sync_period(self.store.finalized_header.slot);
@ -177,11 +119,6 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
self.verify_optimistic_update(&optimistic_update)?; self.verify_optimistic_update(&optimistic_update)?;
self.apply_optimistic_update(&optimistic_update); self.apply_optimistic_update(&optimistic_update);
info!(
"consensus client in sync with checkpoint: 0x{}",
hex::encode(&self.initial_checkpoint)
);
Ok(()) Ok(())
} }
@ -221,13 +158,8 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
.map_err(|_| eyre!("could not fetch bootstrap"))?; .map_err(|_| eyre!("could not fetch bootstrap"))?;
let is_valid = self.is_valid_checkpoint(bootstrap.header.slot); let is_valid = self.is_valid_checkpoint(bootstrap.header.slot);
if !is_valid { if !is_valid {
if self.config.strict_checkpoint_age { return Err(ConsensusError::CheckpointTooOld.into());
return Err(ConsensusError::CheckpointTooOld.into());
} else {
warn!("checkpoint too old, consider using a more recent block");
}
} }
let committee_valid = is_current_committee_proof_valid( let committee_valid = is_current_committee_proof_valid(
@ -531,13 +463,18 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
fn age(&self, slot: u64) -> Duration { fn age(&self, slot: u64) -> Duration {
let expected_time = self.slot_timestamp(slot); let expected_time = self.slot_timestamp(slot);
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let now = std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap();
let delay = now - std::time::Duration::from_secs(expected_time); let delay = now - std::time::Duration::from_secs(expected_time);
chrono::Duration::from_std(delay).unwrap() chrono::Duration::from_std(delay).unwrap()
} }
pub fn expected_current_slot(&self) -> u64 { pub fn expected_current_slot(&self) -> u64 {
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let now = std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap();
let genesis_time = self.config.chain.genesis_time; let genesis_time = self.config.chain.genesis_time;
let since_genesis = now - std::time::Duration::from_secs(genesis_time); let since_genesis = now - std::time::Duration::from_secs(genesis_time);
@ -555,7 +492,7 @@ impl<R: ConsensusRpc> ConsensusClient<R> {
let next_slot = current_slot + 1; let next_slot = current_slot + 1;
let next_slot_timestamp = self.slot_timestamp(next_slot); let next_slot_timestamp = self.slot_timestamp(next_slot);
let now = SystemTime::now() let now = std::time::SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.unwrap() .unwrap()
.as_secs(); .as_secs();
@ -586,7 +523,7 @@ fn get_participating_keys(
bitfield.iter().enumerate().for_each(|(i, bit)| { bitfield.iter().enumerate().for_each(|(i, bit)| {
if bit == true { if bit == true {
let pk = &committee.pubkeys[i]; let pk = &committee.pubkeys[i];
let pk = PublicKey::from_bytes_unchecked(pk).unwrap(); let pk = PublicKey::from_bytes(pk).unwrap();
pks.push(pk); pks.push(pk);
} }
}); });
@ -657,14 +594,14 @@ mod tests {
}; };
use config::{networks, Config}; use config::{networks, Config};
async fn get_client(strict_checkpoint_age: bool) -> ConsensusClient<MockRpc> { async fn get_client(large_checkpoint_age: bool) -> ConsensusClient<MockRpc> {
let base_config = networks::goerli(); let base_config = networks::goerli();
let config = Config { let config = Config {
consensus_rpc: String::new(), consensus_rpc: String::new(),
execution_rpc: String::new(), execution_rpc: String::new(),
chain: base_config.chain, chain: base_config.chain,
forks: base_config.forks, forks: base_config.forks,
strict_checkpoint_age, max_checkpoint_age: if large_checkpoint_age { 123123123 } else { 123 },
..Default::default() ..Default::default()
}; };
@ -679,7 +616,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_update() { async fn test_verify_update() {
let client = get_client(false).await; let client = get_client(true).await;
let period = calc_sync_period(client.store.finalized_header.slot); let period = calc_sync_period(client.store.finalized_header.slot);
let updates = client let updates = client
.rpc .rpc
@ -693,7 +630,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_update_invalid_committee() { async fn test_verify_update_invalid_committee() {
let client = get_client(false).await; let client = get_client(true).await;
let period = calc_sync_period(client.store.finalized_header.slot); let period = calc_sync_period(client.store.finalized_header.slot);
let updates = client let updates = client
.rpc .rpc
@ -713,7 +650,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_update_invalid_finality() { async fn test_verify_update_invalid_finality() {
let client = get_client(false).await; let client = get_client(true).await;
let period = calc_sync_period(client.store.finalized_header.slot); let period = calc_sync_period(client.store.finalized_header.slot);
let updates = client let updates = client
.rpc .rpc
@ -733,7 +670,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_update_invalid_sig() { async fn test_verify_update_invalid_sig() {
let client = get_client(false).await; let client = get_client(true).await;
let period = calc_sync_period(client.store.finalized_header.slot); let period = calc_sync_period(client.store.finalized_header.slot);
let updates = client let updates = client
.rpc .rpc
@ -753,7 +690,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_finality() { async fn test_verify_finality() {
let mut client = get_client(false).await; let mut client = get_client(true).await;
client.sync().await.unwrap(); client.sync().await.unwrap();
let update = client.rpc.get_finality_update().await.unwrap(); let update = client.rpc.get_finality_update().await.unwrap();
@ -763,7 +700,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_finality_invalid_finality() { async fn test_verify_finality_invalid_finality() {
let mut client = get_client(false).await; let mut client = get_client(true).await;
client.sync().await.unwrap(); client.sync().await.unwrap();
let mut update = client.rpc.get_finality_update().await.unwrap(); let mut update = client.rpc.get_finality_update().await.unwrap();
@ -778,7 +715,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_finality_invalid_sig() { async fn test_verify_finality_invalid_sig() {
let mut client = get_client(false).await; let mut client = get_client(true).await;
client.sync().await.unwrap(); client.sync().await.unwrap();
let mut update = client.rpc.get_finality_update().await.unwrap(); let mut update = client.rpc.get_finality_update().await.unwrap();
@ -793,7 +730,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_optimistic() { async fn test_verify_optimistic() {
let mut client = get_client(false).await; let mut client = get_client(true).await;
client.sync().await.unwrap(); client.sync().await.unwrap();
let update = client.rpc.get_optimistic_update().await.unwrap(); let update = client.rpc.get_optimistic_update().await.unwrap();
@ -802,7 +739,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_verify_optimistic_invalid_sig() { async fn test_verify_optimistic_invalid_sig() {
let mut client = get_client(false).await; let mut client = get_client(true).await;
client.sync().await.unwrap(); client.sync().await.unwrap();
let mut update = client.rpc.get_optimistic_update().await.unwrap(); let mut update = client.rpc.get_optimistic_update().await.unwrap();
@ -818,6 +755,6 @@ mod tests {
#[tokio::test] #[tokio::test]
#[should_panic] #[should_panic]
async fn test_verify_checkpoint_age_invalid() { async fn test_verify_checkpoint_age_invalid() {
get_client(true).await; get_client(false).await;
} }
} }

View File

@ -24,6 +24,4 @@ pub enum ConsensusError {
PayloadNotFound(u64), PayloadNotFound(u64),
#[error("checkpoint is too old")] #[error("checkpoint is too old")]
CheckpointTooOld, CheckpointTooOld,
#[error("consensus rpc is for the incorrect network")]
IncorrectRpcNetwork,
} }

View File

@ -1,15 +1,16 @@
use std::{fs::read_to_string, path::PathBuf}; use std::{fs::read_to_string, path::PathBuf};
use async_trait::async_trait;
use eyre::Result;
use super::ConsensusRpc; use super::ConsensusRpc;
use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update}; use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update};
use async_trait::async_trait;
use eyre::Result;
pub struct MockRpc { pub struct MockRpc {
testdata: PathBuf, testdata: PathBuf,
} }
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
impl ConsensusRpc for MockRpc { impl ConsensusRpc for MockRpc {
fn new(path: &str) -> Self { fn new(path: &str) -> Self {
MockRpc { MockRpc {
@ -41,8 +42,4 @@ impl ConsensusRpc for MockRpc {
let block = read_to_string(self.testdata.join("blocks.json"))?; let block = read_to_string(self.testdata.join("blocks.json"))?;
Ok(serde_json::from_str(&block)?) Ok(serde_json::from_str(&block)?)
} }
async fn chain_id(&self) -> Result<u64> {
eyre::bail!("not implemented")
}
} }

View File

@ -7,8 +7,7 @@ use eyre::Result;
use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update}; use crate::types::{BeaconBlock, Bootstrap, FinalityUpdate, OptimisticUpdate, Update};
// implements https://github.com/ethereum/beacon-APIs/tree/master/apis/beacon/light_client // implements https://github.com/ethereum/beacon-APIs/tree/master/apis/beacon/light_client
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
pub trait ConsensusRpc { pub trait ConsensusRpc {
fn new(path: &str) -> Self; fn new(path: &str) -> Self;
async fn get_bootstrap(&self, block_root: &'_ [u8]) -> Result<Bootstrap>; async fn get_bootstrap(&self, block_root: &'_ [u8]) -> Result<Bootstrap>;
@ -16,5 +15,4 @@ pub trait ConsensusRpc {
async fn get_finality_update(&self) -> Result<FinalityUpdate>; async fn get_finality_update(&self) -> Result<FinalityUpdate>;
async fn get_optimistic_update(&self) -> Result<OptimisticUpdate>; async fn get_optimistic_update(&self) -> Result<OptimisticUpdate>;
async fn get_block(&self, slot: u64) -> Result<BeaconBlock>; async fn get_block(&self, slot: u64) -> Result<BeaconBlock>;
async fn chain_id(&self) -> Result<u64>;
} }

View File

@ -1,23 +1,33 @@
use async_trait::async_trait; use async_trait::async_trait;
use common::errors::RpcError;
use eyre::Result; use eyre::Result;
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
use std::cmp; use std::cmp;
use super::ConsensusRpc; use super::ConsensusRpc;
use crate::constants::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use crate::constants::MAX_REQUEST_LIGHT_CLIENT_UPDATES;
use crate::types::*; use crate::types::*;
use common::errors::RpcError;
#[derive(Debug)]
pub struct NimbusRpc { pub struct NimbusRpc {
rpc: String, rpc: String,
client: ClientWithMiddleware,
} }
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
impl ConsensusRpc for NimbusRpc { impl ConsensusRpc for NimbusRpc {
fn new(rpc: &str) -> Self { fn new(rpc: &str) -> Self {
let retry_policy = ExponentialBackoff::builder()
.backoff_exponent(1)
.build_with_max_retries(3);
let client = ClientBuilder::new(reqwest::Client::new())
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
.build();
NimbusRpc { NimbusRpc {
rpc: rpc.to_string(), rpc: rpc.to_string(),
client,
} }
} }
@ -28,8 +38,8 @@ impl ConsensusRpc for NimbusRpc {
self.rpc, root_hex self.rpc, root_hex
); );
let client = reqwest::Client::new(); let res = self
let res = client .client
.get(req) .get(req)
.send() .send()
.await .await
@ -48,8 +58,8 @@ impl ConsensusRpc for NimbusRpc {
self.rpc, period, count self.rpc, period, count
); );
let client = reqwest::Client::new(); let res = self
let res = client .client
.get(req) .get(req)
.send() .send()
.await .await
@ -63,7 +73,10 @@ impl ConsensusRpc for NimbusRpc {
async fn get_finality_update(&self) -> Result<FinalityUpdate> { async fn get_finality_update(&self) -> Result<FinalityUpdate> {
let req = format!("{}/eth/v1/beacon/light_client/finality_update", self.rpc); let req = format!("{}/eth/v1/beacon/light_client/finality_update", self.rpc);
let res = reqwest::get(req) let res = self
.client
.get(req)
.send()
.await .await
.map_err(|e| RpcError::new("finality_update", e))? .map_err(|e| RpcError::new("finality_update", e))?
.json::<FinalityUpdateResponse>() .json::<FinalityUpdateResponse>()
@ -75,7 +88,10 @@ impl ConsensusRpc for NimbusRpc {
async fn get_optimistic_update(&self) -> Result<OptimisticUpdate> { async fn get_optimistic_update(&self) -> Result<OptimisticUpdate> {
let req = format!("{}/eth/v1/beacon/light_client/optimistic_update", self.rpc); let req = format!("{}/eth/v1/beacon/light_client/optimistic_update", self.rpc);
let res = reqwest::get(req) let res = self
.client
.get(req)
.send()
.await .await
.map_err(|e| RpcError::new("optimistic_update", e))? .map_err(|e| RpcError::new("optimistic_update", e))?
.json::<OptimisticUpdateResponse>() .json::<OptimisticUpdateResponse>()
@ -87,7 +103,10 @@ impl ConsensusRpc for NimbusRpc {
async fn get_block(&self, slot: u64) -> Result<BeaconBlock> { async fn get_block(&self, slot: u64) -> Result<BeaconBlock> {
let req = format!("{}/eth/v2/beacon/blocks/{}", self.rpc, slot); let req = format!("{}/eth/v2/beacon/blocks/{}", self.rpc, slot);
let res = reqwest::get(req) let res = self
.client
.get(req)
.send()
.await .await
.map_err(|e| RpcError::new("blocks", e))? .map_err(|e| RpcError::new("blocks", e))?
.json::<BeaconBlockResponse>() .json::<BeaconBlockResponse>()
@ -96,18 +115,6 @@ impl ConsensusRpc for NimbusRpc {
Ok(res.data.message) Ok(res.data.message)
} }
async fn chain_id(&self) -> Result<u64> {
let req = format!("{}/eth/v1/config/spec", self.rpc);
let res = reqwest::get(req)
.await
.map_err(|e| RpcError::new("spec", e))?
.json::<SpecResponse>()
.await
.map_err(|e| RpcError::new("spec", e))?;
Ok(res.data.chain_id)
}
} }
#[derive(serde::Deserialize, Debug)] #[derive(serde::Deserialize, Debug)]
@ -141,14 +148,3 @@ struct OptimisticUpdateResponse {
struct BootstrapResponse { struct BootstrapResponse {
data: Bootstrap, data: Bootstrap,
} }
#[derive(serde::Deserialize, Debug)]
struct SpecResponse {
data: Spec,
}
#[derive(serde::Deserialize, Debug)]
struct Spec {
#[serde(rename = "DEPOSIT_NETWORK_ID", deserialize_with = "u64_deserialize")]
chain_id: u64,
}

View File

@ -4,7 +4,6 @@ use ssz_rs::prelude::*;
use common::types::Bytes32; use common::types::Bytes32;
use common::utils::hex_str_to_bytes; use common::utils::hex_str_to_bytes;
use superstruct::superstruct;
pub type BLSPubKey = Vector<u8, 48>; pub type BLSPubKey = Vector<u8, 48>;
pub type SignatureBytes = Vector<u8, 96>; pub type SignatureBytes = Vector<u8, 96>;
@ -25,15 +24,7 @@ pub struct BeaconBlock {
pub body: BeaconBlockBody, pub body: BeaconBlockBody,
} }
#[superstruct( #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
variants(Bellatrix, Capella),
variant_attributes(
derive(serde::Deserialize, Clone, Debug, SimpleSerialize, Default),
serde(deny_unknown_fields)
)
)]
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
pub struct BeaconBlockBody { pub struct BeaconBlockBody {
#[serde(deserialize_with = "signature_deserialize")] #[serde(deserialize_with = "signature_deserialize")]
randao_reveal: SignatureBytes, randao_reveal: SignatureBytes,
@ -47,79 +38,9 @@ pub struct BeaconBlockBody {
voluntary_exits: List<SignedVoluntaryExit, 16>, voluntary_exits: List<SignedVoluntaryExit, 16>,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
pub execution_payload: ExecutionPayload, pub execution_payload: ExecutionPayload,
#[superstruct(only(Capella))]
bls_to_execution_changes: List<SignedBlsToExecutionChange, 16>,
} }
impl ssz_rs::Merkleized for BeaconBlockBody { #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
fn hash_tree_root(&mut self) -> Result<Node, MerkleizationError> {
match self {
BeaconBlockBody::Bellatrix(body) => body.hash_tree_root(),
BeaconBlockBody::Capella(body) => body.hash_tree_root(),
}
}
}
impl ssz_rs::Sized for BeaconBlockBody {
fn is_variable_size() -> bool {
true
}
fn size_hint() -> usize {
0
}
}
impl ssz_rs::Serialize for BeaconBlockBody {
fn serialize(&self, buffer: &mut Vec<u8>) -> Result<usize, SerializeError> {
match self {
BeaconBlockBody::Bellatrix(body) => body.serialize(buffer),
BeaconBlockBody::Capella(body) => body.serialize(buffer),
}
}
}
impl ssz_rs::Deserialize for BeaconBlockBody {
fn deserialize(_encoding: &[u8]) -> Result<Self, DeserializeError>
where
Self: Sized,
{
panic!("not implemented");
}
}
#[derive(Default, Clone, Debug, SimpleSerialize, serde::Deserialize)]
pub struct SignedBlsToExecutionChange {
message: BlsToExecutionChange,
#[serde(deserialize_with = "signature_deserialize")]
signature: SignatureBytes,
}
#[derive(Default, Clone, Debug, SimpleSerialize, serde::Deserialize)]
pub struct BlsToExecutionChange {
#[serde(deserialize_with = "u64_deserialize")]
validator_index: u64,
#[serde(deserialize_with = "pubkey_deserialize")]
from_bls_pubkey: BLSPubKey,
#[serde(deserialize_with = "address_deserialize")]
to_execution_address: Address,
}
impl Default for BeaconBlockBody {
fn default() -> Self {
BeaconBlockBody::Bellatrix(BeaconBlockBodyBellatrix::default())
}
}
#[superstruct(
variants(Bellatrix, Capella),
variant_attributes(
derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone),
serde(deny_unknown_fields)
)
)]
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
pub struct ExecutionPayload { pub struct ExecutionPayload {
#[serde(deserialize_with = "bytes32_deserialize")] #[serde(deserialize_with = "bytes32_deserialize")]
pub parent_hash: Bytes32, pub parent_hash: Bytes32,
@ -149,67 +70,10 @@ pub struct ExecutionPayload {
pub block_hash: Bytes32, pub block_hash: Bytes32,
#[serde(deserialize_with = "transactions_deserialize")] #[serde(deserialize_with = "transactions_deserialize")]
pub transactions: List<Transaction, 1048576>, pub transactions: List<Transaction, 1048576>,
#[superstruct(only(Capella))]
withdrawals: List<Withdrawal, 16>,
}
#[derive(Default, Clone, Debug, SimpleSerialize, serde::Deserialize)]
pub struct Withdrawal {
#[serde(deserialize_with = "u64_deserialize")]
index: u64,
#[serde(deserialize_with = "u64_deserialize")]
validator_index: u64,
#[serde(deserialize_with = "address_deserialize")]
address: Address,
#[serde(deserialize_with = "u64_deserialize")]
amount: u64,
}
impl ssz_rs::Merkleized for ExecutionPayload {
fn hash_tree_root(&mut self) -> Result<Node, MerkleizationError> {
match self {
ExecutionPayload::Bellatrix(payload) => payload.hash_tree_root(),
ExecutionPayload::Capella(payload) => payload.hash_tree_root(),
}
}
}
impl ssz_rs::Sized for ExecutionPayload {
fn is_variable_size() -> bool {
true
}
fn size_hint() -> usize {
0
}
}
impl ssz_rs::Serialize for ExecutionPayload {
fn serialize(&self, buffer: &mut Vec<u8>) -> Result<usize, SerializeError> {
match self {
ExecutionPayload::Bellatrix(payload) => payload.serialize(buffer),
ExecutionPayload::Capella(payload) => payload.serialize(buffer),
}
}
}
impl ssz_rs::Deserialize for ExecutionPayload {
fn deserialize(_encoding: &[u8]) -> Result<Self, DeserializeError>
where
Self: Sized,
{
panic!("not implemented");
}
}
impl Default for ExecutionPayload {
fn default() -> Self {
ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default())
}
} }
#[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)] #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
pub struct ProposerSlashing { struct ProposerSlashing {
signed_header_1: SignedBeaconBlockHeader, signed_header_1: SignedBeaconBlockHeader,
signed_header_2: SignedBeaconBlockHeader, signed_header_2: SignedBeaconBlockHeader,
} }
@ -236,7 +100,7 @@ struct BeaconBlockHeader {
} }
#[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)] #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
pub struct AttesterSlashing { struct AttesterSlashing {
attestation_1: IndexedAttestation, attestation_1: IndexedAttestation,
attestation_2: IndexedAttestation, attestation_2: IndexedAttestation,
} }
@ -251,7 +115,7 @@ struct IndexedAttestation {
} }
#[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)] #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
pub struct Attestation { struct Attestation {
aggregation_bits: Bitlist<2048>, aggregation_bits: Bitlist<2048>,
data: AttestationData, data: AttestationData,
#[serde(deserialize_with = "signature_deserialize")] #[serde(deserialize_with = "signature_deserialize")]
@ -279,7 +143,7 @@ struct Checkpoint {
} }
#[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)] #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
pub struct SignedVoluntaryExit { struct SignedVoluntaryExit {
message: VoluntaryExit, message: VoluntaryExit,
#[serde(deserialize_with = "signature_deserialize")] #[serde(deserialize_with = "signature_deserialize")]
signature: SignatureBytes, signature: SignatureBytes,
@ -294,7 +158,7 @@ struct VoluntaryExit {
} }
#[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)] #[derive(serde::Deserialize, Debug, Default, SimpleSerialize, Clone)]
pub struct Deposit { struct Deposit {
#[serde(deserialize_with = "bytes_vector_deserialize")] #[serde(deserialize_with = "bytes_vector_deserialize")]
proof: Vector<Bytes32, 33>, proof: Vector<Bytes32, 33>,
data: DepositData, data: DepositData,
@ -324,7 +188,6 @@ pub struct Eth1Data {
#[derive(serde::Deserialize, Debug)] #[derive(serde::Deserialize, Debug)]
pub struct Bootstrap { pub struct Bootstrap {
#[serde(deserialize_with = "header_deserialize")]
pub header: Header, pub header: Header,
pub current_sync_committee: SyncCommittee, pub current_sync_committee: SyncCommittee,
#[serde(deserialize_with = "branch_deserialize")] #[serde(deserialize_with = "branch_deserialize")]
@ -333,12 +196,10 @@ pub struct Bootstrap {
#[derive(serde::Deserialize, Debug, Clone)] #[derive(serde::Deserialize, Debug, Clone)]
pub struct Update { pub struct Update {
#[serde(deserialize_with = "header_deserialize")]
pub attested_header: Header, pub attested_header: Header,
pub next_sync_committee: SyncCommittee, pub next_sync_committee: SyncCommittee,
#[serde(deserialize_with = "branch_deserialize")] #[serde(deserialize_with = "branch_deserialize")]
pub next_sync_committee_branch: Vec<Bytes32>, pub next_sync_committee_branch: Vec<Bytes32>,
#[serde(deserialize_with = "header_deserialize")]
pub finalized_header: Header, pub finalized_header: Header,
#[serde(deserialize_with = "branch_deserialize")] #[serde(deserialize_with = "branch_deserialize")]
pub finality_branch: Vec<Bytes32>, pub finality_branch: Vec<Bytes32>,
@ -349,9 +210,7 @@ pub struct Update {
#[derive(serde::Deserialize, Debug)] #[derive(serde::Deserialize, Debug)]
pub struct FinalityUpdate { pub struct FinalityUpdate {
#[serde(deserialize_with = "header_deserialize")]
pub attested_header: Header, pub attested_header: Header,
#[serde(deserialize_with = "header_deserialize")]
pub finalized_header: Header, pub finalized_header: Header,
#[serde(deserialize_with = "branch_deserialize")] #[serde(deserialize_with = "branch_deserialize")]
pub finality_branch: Vec<Bytes32>, pub finality_branch: Vec<Bytes32>,
@ -362,7 +221,6 @@ pub struct FinalityUpdate {
#[derive(serde::Deserialize, Debug)] #[derive(serde::Deserialize, Debug)]
pub struct OptimisticUpdate { pub struct OptimisticUpdate {
#[serde(deserialize_with = "header_deserialize")]
pub attested_header: Header, pub attested_header: Header,
pub sync_aggregate: SyncAggregate, pub sync_aggregate: SyncAggregate,
#[serde(deserialize_with = "u64_deserialize")] #[serde(deserialize_with = "u64_deserialize")]
@ -512,7 +370,7 @@ where
.map_err(D::Error::custom) .map_err(D::Error::custom)
} }
pub fn u64_deserialize<'de, D>(deserializer: D) -> Result<u64, D::Error> fn u64_deserialize<'de, D>(deserializer: D) -> Result<u64, D::Error>
where where
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
@ -595,27 +453,3 @@ where
Ok(attesting_indices) Ok(attesting_indices)
} }
fn header_deserialize<'de, D>(deserializer: D) -> Result<Header, D::Error>
where
D: serde::Deserializer<'de>,
{
let header: LightClientHeader = serde::Deserialize::deserialize(deserializer)?;
Ok(match header {
LightClientHeader::Unwrapped(header) => header,
LightClientHeader::Wrapped(header) => header.beacon,
})
}
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum LightClientHeader {
Unwrapped(Header),
Wrapped(Beacon),
}
#[derive(serde::Deserialize)]
struct Beacon {
beacon: Header,
}

View File

@ -1,6 +1,9 @@
use blst::{
min_pk::{PublicKey, Signature},
BLST_ERROR,
};
use common::{types::Bytes32, utils::bytes32_to_node}; use common::{types::Bytes32, utils::bytes32_to_node};
use eyre::Result; use eyre::Result;
use milagro_bls::{AggregateSignature, PublicKey};
use ssz_rs::prelude::*; use ssz_rs::prelude::*;
use crate::types::{Header, SignatureBytes}; use crate::types::{Header, SignatureBytes};
@ -11,9 +14,10 @@ pub fn calc_sync_period(slot: u64) -> u64 {
} }
pub fn is_aggregate_valid(sig_bytes: &SignatureBytes, msg: &[u8], pks: &[&PublicKey]) -> bool { pub fn is_aggregate_valid(sig_bytes: &SignatureBytes, msg: &[u8], pks: &[&PublicKey]) -> bool {
let sig_res = AggregateSignature::from_bytes(sig_bytes); let dst: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
let sig_res = Signature::from_bytes(sig_bytes);
match sig_res { match sig_res {
Ok(sig) => sig.fast_aggregate_verify(msg, pks), Ok(sig) => sig.fast_aggregate_verify(true, msg, dst, pks) == BLST_ERROR::BLST_SUCCESS,
Err(_) => false, Err(_) => false,
} }
} }

View File

@ -38,5 +38,5 @@ async fn test_get_payload() {
client.sync().await.unwrap(); client.sync().await.unwrap();
let payload = client.get_execution_payload(&None).await.unwrap(); let payload = client.get_execution_payload(&None).await.unwrap();
assert_eq!(*payload.block_number(), 7530932); assert_eq!(payload.block_number, 7530932);
} }

View File

@ -1,93 +0,0 @@
#![allow(deprecated)]
use env_logger::Env;
use ethers::prelude::*;
use std::{path::PathBuf, sync::Arc};
use helios::{
client::{Client, ClientBuilder, FileDB},
config::networks::Network,
types::{BlockTag, CallOpts},
};
// Generate the type-safe contract bindings with an ABI
abigen!(
Renderer,
r#"[
function renderBroker(uint256) external view returns (string memory)
function renderBroker(uint256, uint256) external view returns (string memory)
]"#,
event_derives(serde::Deserialize, serde::Serialize)
);
#[tokio::main]
async fn main() -> eyre::Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("debug")).init();
// Load the rpc url using the `MAINNET_RPC_URL` environment variable
let eth_rpc_url = std::env::var("MAINNET_RPC_URL")?;
let consensus_rpc = "https://www.lightclientdata.org";
log::info!("Consensus RPC URL: {}", consensus_rpc);
// Construct the client
let data_dir = PathBuf::from("/tmp/helios");
let mut client: Client<FileDB> = ClientBuilder::new()
.network(Network::MAINNET)
.data_dir(data_dir)
.consensus_rpc(consensus_rpc)
.execution_rpc(&eth_rpc_url)
.load_external_fallback()
.build()?;
log::info!(
"[\"{}\"] Client built with external checkpoint fallbacks",
Network::MAINNET
);
// Start the client
client.start().await?;
// Call the erroneous account method
// The expected asset is: https://0x8bb9a8baeec177ae55ac410c429cbbbbb9198cac.w3eth.io/renderBroker/5
// Retrieved by calling `renderBroker(5)` on the contract: https://etherscan.io/address/0x8bb9a8baeec177ae55ac410c429cbbbbb9198cac#code
let account = "0x8bb9a8baeec177ae55ac410c429cbbbbb9198cac";
let method = "renderBroker(uint256)";
let method2 = "renderBroker(uint256, uint256)";
let argument = U256::from(5);
let address = account.parse::<Address>()?;
let block = BlockTag::Latest;
let provider = Provider::<Http>::try_from(eth_rpc_url)?;
let render = Renderer::new(address, Arc::new(provider.clone()));
log::debug!("Context: call @ {account}::{method} <{argument}>");
// Call using abigen
let result = render.render_broker_0(argument).call().await?;
log::info!(
"[ABIGEN] {account}::{method} -> Response Length: {:?}",
result.len()
);
let render = Renderer::new(address, Arc::new(provider.clone()));
let result = render
.render_broker_1(argument, U256::from(10))
.call()
.await?;
log::info!(
"[ABIGEN] {account}::{method2} -> Response Length: {:?}",
result.len()
);
// Call on helios client
let encoded_call = render.render_broker_0(argument).calldata().unwrap();
let call_opts = CallOpts {
from: Some("0xBE0eB53F46cd790Cd13851d5EFf43D12404d33E8".parse::<Address>()?),
to: Some(address),
gas: Some(U256::from(U64::MAX.as_u64())),
gas_price: None,
value: None,
data: Some(encoded_call.to_vec()),
};
log::debug!("Calling helios client on block: {block:?}");
let result = client.call(&call_opts, block).await?;
log::info!("[HELIOS] {account}::{method} ->{:?}", result.len());
Ok(())
}

View File

@ -35,7 +35,7 @@ async fn main() -> Result<()> {
builder = builder.load_external_fallback(); builder = builder.load_external_fallback();
// Build the client // Build the client
let _client: Client<FileDB> = builder.build().unwrap(); let _client = builder.build().unwrap();
println!("Constructed client!"); println!("Constructed client!");
Ok(()) Ok(())

View File

@ -1,5 +1,4 @@
use config::CliConfig; use config::CliConfig;
use dirs::home_dir;
use eyre::Result; use eyre::Result;
use helios::prelude::*; use helios::prelude::*;
@ -7,7 +6,7 @@ use helios::prelude::*;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
// Load the config from the global config file // Load the config from the global config file
let config_path = home_dir().unwrap().join(".helios/helios.toml"); let config_path = home::home_dir().unwrap().join(".helios/helios.toml");
let config = Config::from_file(&config_path, "mainnet", &CliConfig::default()); let config = Config::from_file(&config_path, "mainnet", &CliConfig::default());
println!("Constructed config: {config:#?}"); println!("Constructed config: {config:#?}");

View File

@ -1,9 +1,9 @@
use std::{path::PathBuf, str::FromStr}; use std::str::FromStr;
use env_logger::Env; use env_logger::Env;
use ethers::{types::Address, utils}; use ethers::{types::Address, utils};
use eyre::Result; use eyre::Result;
use helios::{config::networks::Network, prelude::*}; use helios::{client::ClientBuilder, config::networks::Network, types::BlockTag};
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
@ -15,14 +15,12 @@ async fn main() -> Result<()> {
let consensus_rpc = "https://www.lightclientdata.org"; let consensus_rpc = "https://www.lightclientdata.org";
log::info!("Using consensus RPC URL: {}", consensus_rpc); log::info!("Using consensus RPC URL: {}", consensus_rpc);
let mut client: Client<FileDB> = ClientBuilder::new() let mut client = ClientBuilder::new()
.network(Network::MAINNET) .network(Network::MAINNET)
.consensus_rpc(consensus_rpc) .consensus_rpc(consensus_rpc)
.execution_rpc(untrusted_rpc_url) .execution_rpc(untrusted_rpc_url)
.load_external_fallback() .load_external_fallback()
.data_dir(PathBuf::from("/tmp/helios"))
.build()?; .build()?;
log::info!( log::info!(
"Built client on network \"{}\" with external checkpoint fallbacks", "Built client on network \"{}\" with external checkpoint fallbacks",
Network::MAINNET Network::MAINNET

View File

@ -1,17 +1,18 @@
[package] [package]
name = "execution" name = "execution"
version = "0.3.0" version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
reqwest = { version = "0.11", features = ["json"] } reqwest = { version = "0.11", features = ["json"] }
tokio = { version = "1", features = ["full"] }
eyre = "0.6.8" eyre = "0.6.8"
serde = { version = "1.0.143", features = ["derive"] } serde = { version = "1.0.143", features = ["derive"] }
serde_json = "1.0.85" serde_json = "1.0.85"
hex = "0.4.3" hex = "0.4.3"
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "d09f55b4f8554491e3431e01af1c32347a8781cd" } ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f18ca919cc1b685b861d0fa9e2daabe89737" }
revm = { version = "2.3", default-features = false, features = ["std", "k256", "with-serde"] } ethers = "1.0.2"
ethers = "1.0.0" revm = "2.1.0"
bytes = "1.2.1" bytes = "1.2.1"
futures = "0.3.23" futures = "0.3.23"
toml = "0.5.9" toml = "0.5.9"
@ -19,10 +20,7 @@ triehash-ethereum = { git = "https://github.com/openethereum/parity-ethereum", r
async-trait = "0.1.57" async-trait = "0.1.57"
log = "0.4.17" log = "0.4.17"
thiserror = "1.0.37" thiserror = "1.0.37"
openssl = { version = "0.10", features = ["vendored"] }
common = { path = "../common" } common = { path = "../common" }
consensus = { path = "../consensus" } consensus = { path = "../consensus" }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
openssl = { version = "0.10", features = ["vendored"] }
tokio = { version = "1", features = ["full"] }

View File

@ -24,18 +24,6 @@ pub enum ExecutionError {
MissingLog(String, U256), MissingLog(String, U256),
#[error("too many logs to prove: {0}, current limit is: {1}")] #[error("too many logs to prove: {0}, current limit is: {1}")]
TooManyLogsToProve(usize, usize), TooManyLogsToProve(usize, usize),
#[error("execution rpc is for the incorect network")]
IncorrectRpcNetwork(),
#[error("Invalid base gas fee helios {0} vs rpc endpoint {1} at block {2}")]
InvalidBaseGaseFee(U256, U256, u64),
#[error("Invalid gas used ratio of helios {0} vs rpc endpoint {1} at block {2}")]
InvalidGasUsedRatio(f64, f64, u64),
#[error("Block {0} not found")]
BlockNotFoundError(u64),
#[error("Helios Execution Payload is empty")]
EmptyExecutionPayload(),
#[error("User query for block {0} but helios oldest block is {1}")]
InvalidBlockRange(u64, u64),
} }
/// Errors that can occur during evm.rs calls /// Errors that can occur during evm.rs calls

View File

@ -6,19 +6,17 @@ use std::{
}; };
use bytes::Bytes; use bytes::Bytes;
use common::{ use common::{errors::BlockNotFoundError, types::BlockTag};
errors::{BlockNotFoundError, SlotNotFoundError},
types::BlockTag,
};
use ethers::{ use ethers::{
abi::ethereum_types::BigEndianHash, abi::ethereum_types::BigEndianHash,
prelude::{Address, H160, H256, U256},
types::transaction::eip2930::AccessListItem, types::transaction::eip2930::AccessListItem,
types::{Address, H160, H256, U256},
}; };
use eyre::{Report, Result}; use eyre::{Report, Result};
use futures::{executor::block_on, future::join_all}; use futures::future::join_all;
use log::trace; use log::trace;
use revm::{AccountInfo, Bytecode, Database, Env, TransactOut, TransactTo, EVM}; use revm::{AccountInfo, Bytecode, Database, Env, TransactOut, TransactTo, EVM};
use tokio::runtime::Runtime;
use consensus::types::ExecutionPayload; use consensus::types::ExecutionPayload;
@ -62,7 +60,7 @@ impl<'a, R: ExecutionRpc> Evm<'a, R> {
TransactOut::Call(bytes) => Err(EvmError::Revert(Some(bytes))), TransactOut::Call(bytes) => Err(EvmError::Revert(Some(bytes))),
_ => Err(EvmError::Revert(None)), _ => Err(EvmError::Revert(None)),
}, },
revm::Return::Return | revm::Return::Stop => { revm::Return::Return => {
if let Some(err) = &self.evm.db.as_ref().unwrap().error { if let Some(err) = &self.evm.db.as_ref().unwrap().error {
return Err(EvmError::Generic(err.clone())); return Err(EvmError::Generic(err.clone()));
} }
@ -90,7 +88,7 @@ impl<'a, R: ExecutionRpc> Evm<'a, R> {
TransactOut::Call(bytes) => Err(EvmError::Revert(Some(bytes))), TransactOut::Call(bytes) => Err(EvmError::Revert(Some(bytes))),
_ => Err(EvmError::Revert(None)), _ => Err(EvmError::Revert(None)),
}, },
revm::Return::Return | revm::Return::Stop => { revm::Return::Return => {
if let Some(err) = &self.evm.db.as_ref().unwrap().error { if let Some(err) = &self.evm.db.as_ref().unwrap().error {
return Err(EvmError::Generic(err.clone())); return Err(EvmError::Generic(err.clone()));
} }
@ -111,7 +109,7 @@ impl<'a, R: ExecutionRpc> Evm<'a, R> {
let rpc = db.execution.rpc.clone(); let rpc = db.execution.rpc.clone();
let payload = db.current_payload.clone(); let payload = db.current_payload.clone();
let execution = db.execution.clone(); let execution = db.execution.clone();
let block = *db.current_payload.block_number(); let block = db.current_payload.block_number;
let opts_moved = CallOpts { let opts_moved = CallOpts {
from: opts.from, from: opts.from,
@ -134,12 +132,12 @@ impl<'a, R: ExecutionRpc> Evm<'a, R> {
}; };
let to_access_entry = AccessListItem { let to_access_entry = AccessListItem {
address: opts_moved.to.unwrap_or_default(), address: opts_moved.to,
storage_keys: Vec::default(), storage_keys: Vec::default(),
}; };
let producer_account = AccessListItem { let producer_account = AccessListItem {
address: Address::from_slice(payload.fee_recipient()), address: Address::from_slice(&payload.fee_recipient),
storage_keys: Vec::default(), storage_keys: Vec::default(),
}; };
@ -175,17 +173,17 @@ impl<'a, R: ExecutionRpc> Evm<'a, R> {
let mut env = Env::default(); let mut env = Env::default();
let payload = &self.evm.db.as_ref().unwrap().current_payload; let payload = &self.evm.db.as_ref().unwrap().current_payload;
env.tx.transact_to = TransactTo::Call(opts.to.unwrap_or_default()); env.tx.transact_to = TransactTo::Call(opts.to);
env.tx.caller = opts.from.unwrap_or(Address::zero()); env.tx.caller = opts.from.unwrap_or(Address::zero());
env.tx.value = opts.value.unwrap_or(U256::from(0)); env.tx.value = opts.value.unwrap_or(U256::from(0));
env.tx.data = Bytes::from(opts.data.clone().unwrap_or_default()); env.tx.data = Bytes::from(opts.data.clone().unwrap_or(vec![]));
env.tx.gas_limit = opts.gas.map(|v| v.as_u64()).unwrap_or(u64::MAX); env.tx.gas_limit = opts.gas.map(|v| v.as_u64()).unwrap_or(u64::MAX);
env.tx.gas_price = opts.gas_price.unwrap_or(U256::zero()); env.tx.gas_price = opts.gas_price.unwrap_or(U256::zero());
env.block.number = U256::from(*payload.block_number()); env.block.number = U256::from(payload.block_number);
env.block.coinbase = Address::from_slice(payload.fee_recipient()); env.block.coinbase = Address::from_slice(&payload.fee_recipient);
env.block.timestamp = U256::from(*payload.timestamp()); env.block.timestamp = U256::from(payload.timestamp);
env.block.difficulty = U256::from_little_endian(payload.prev_randao()); env.block.difficulty = U256::from_little_endian(&payload.prev_randao);
env.cfg.chain_id = self.chain_id.into(); env.cfg.chain_id = self.chain_id.into();
@ -227,7 +225,8 @@ impl<'a, R: ExecutionRpc> ProofDB<'a, R> {
let handle = thread::spawn(move || { let handle = thread::spawn(move || {
let account_fut = execution.get_account(&address, Some(&slots), &payload); let account_fut = execution.get_account(&address, Some(&slots), &payload);
block_on(account_fut) let runtime = Runtime::new()?;
runtime.block_on(account_fut)
}); });
handle.join().unwrap() handle.join().unwrap()
@ -243,7 +242,7 @@ impl<'a, R: ExecutionRpc> Database for ProofDB<'a, R> {
} }
trace!( trace!(
"fetch basic evm state for address=0x{}", "fetch basic evm state for addess=0x{}",
hex::encode(address.as_bytes()) hex::encode(address.as_bytes())
); );
@ -266,11 +265,15 @@ impl<'a, R: ExecutionRpc> Database for ProofDB<'a, R> {
.payloads .payloads
.get(&number) .get(&number)
.ok_or(BlockNotFoundError::new(BlockTag::Number(number)))?; .ok_or(BlockNotFoundError::new(BlockTag::Number(number)))?;
Ok(H256::from_slice(payload.block_hash())) Ok(H256::from_slice(&payload.block_hash))
} }
fn storage(&mut self, address: H160, slot: U256) -> Result<U256, Report> { fn storage(&mut self, address: H160, slot: U256) -> Result<U256, Report> {
trace!("fetch evm state for address={:?}, slot={}", address, slot); trace!(
"fetch evm state for address=0x{}, slot={}",
hex::encode(address.as_bytes()),
slot
);
let slot = H256::from_uint(&slot); let slot = H256::from_uint(&slot);
@ -281,13 +284,13 @@ impl<'a, R: ExecutionRpc> Database for ProofDB<'a, R> {
.get_account(address, &[slot])? .get_account(address, &[slot])?
.slots .slots
.get(&slot) .get(&slot)
.ok_or(SlotNotFoundError::new(slot))?, .unwrap(),
}, },
None => *self None => *self
.get_account(address, &[slot])? .get_account(address, &[slot])?
.slots .slots
.get(&slot) .get(&slot)
.ok_or(SlotNotFoundError::new(slot))?, .unwrap(),
}) })
} }
@ -300,61 +303,3 @@ fn is_precompile(address: &Address) -> bool {
address.le(&Address::from_str("0x0000000000000000000000000000000000000009").unwrap()) address.le(&Address::from_str("0x0000000000000000000000000000000000000009").unwrap())
&& address.gt(&Address::zero()) && address.gt(&Address::zero())
} }
#[cfg(test)]
mod tests {
use common::utils::hex_str_to_bytes;
use consensus::types::ExecutionPayloadBellatrix;
use ssz_rs::Vector;
use crate::rpc::mock_rpc::MockRpc;
use super::*;
fn get_client() -> ExecutionClient<MockRpc> {
ExecutionClient::new("testdata/").unwrap()
}
#[test]
fn test_proof_db() {
// Construct proofdb params
let execution = get_client();
let address = Address::from_str("14f9D4aF749609c1438528C0Cce1cC3f6D411c47").unwrap();
let payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix {
state_root: Vector::from_iter(
hex_str_to_bytes(
"0xaa02f5db2ee75e3da400d10f3c30e894b6016ce8a2501680380a907b6674ce0d",
)
.unwrap(),
),
..ExecutionPayloadBellatrix::default()
});
let mut payloads = BTreeMap::new();
payloads.insert(7530933, payload.clone());
// Construct the proof database with the given client and payloads
let mut proof_db = ProofDB::new(Arc::new(execution), &payload, &payloads);
// Set the proof db accounts
let slot = U256::from(1337);
let mut accounts = HashMap::new();
let account = Account {
balance: U256::from(100),
code: hex_str_to_bytes("0x").unwrap(),
..Default::default()
};
accounts.insert(address, account);
proof_db.set_accounts(accounts);
// Get the account from the proof database
let storage_proof = proof_db.storage(address, slot);
// Check that the storage proof correctly returns a slot not found error
let expected_err: eyre::Report = SlotNotFoundError::new(H256::from_uint(&slot)).into();
assert_eq!(
expected_err.to_string(),
storage_proof.unwrap_err().to_string()
);
}
}

View File

@ -3,7 +3,7 @@ use std::str::FromStr;
use ethers::abi::AbiEncode; use ethers::abi::AbiEncode;
use ethers::prelude::{Address, U256}; use ethers::prelude::{Address, U256};
use ethers::types::{FeeHistory, Filter, Log, Transaction, TransactionReceipt, H256}; use ethers::types::{Filter, Log, Transaction, TransactionReceipt, H256};
use ethers::utils::keccak256; use ethers::utils::keccak256;
use ethers::utils::rlp::{encode, Encodable, RlpStream}; use ethers::utils::rlp::{encode, Encodable, RlpStream};
use eyre::Result; use eyre::Result;
@ -32,18 +32,10 @@ pub struct ExecutionClient<R: ExecutionRpc> {
impl<R: ExecutionRpc> ExecutionClient<R> { impl<R: ExecutionRpc> ExecutionClient<R> {
pub fn new(rpc: &str) -> Result<Self> { pub fn new(rpc: &str) -> Result<Self> {
let rpc: R = ExecutionRpc::new(rpc)?; let rpc = ExecutionRpc::new(rpc)?;
Ok(ExecutionClient { rpc }) Ok(ExecutionClient { rpc })
} }
pub async fn check_rpc(&self, chain_id: u64) -> Result<()> {
if self.rpc.chain_id().await? != chain_id {
Err(ExecutionError::IncorrectRpcNetwork().into())
} else {
Ok(())
}
}
pub async fn get_account( pub async fn get_account(
&self, &self,
address: &Address, address: &Address,
@ -54,7 +46,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let proof = self let proof = self
.rpc .rpc
.get_proof(address, slots, *payload.block_number()) .get_proof(address, slots, payload.block_number)
.await?; .await?;
let account_path = keccak256(address.as_bytes()).to_vec(); let account_path = keccak256(address.as_bytes()).to_vec();
@ -62,7 +54,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let is_valid = verify_proof( let is_valid = verify_proof(
&proof.account_proof, &proof.account_proof,
payload.state_root(), &payload.state_root,
&account_path, &account_path,
&account_encoded, &account_encoded,
); );
@ -98,7 +90,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let code = if proof.code_hash == KECCAK_EMPTY { let code = if proof.code_hash == KECCAK_EMPTY {
Vec::new() Vec::new()
} else { } else {
let code = self.rpc.get_code(address, *payload.block_number()).await?; let code = self.rpc.get_code(address, payload.block_number).await?;
let code_hash = keccak256(&code).into(); let code_hash = keccak256(&code).into();
if proof.code_hash != code_hash { if proof.code_hash != code_hash {
@ -136,7 +128,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let empty_uncle_hash = "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"; let empty_uncle_hash = "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347";
let tx_hashes = payload let tx_hashes = payload
.transactions() .transactions
.iter() .iter()
.map(|tx| H256::from_slice(&keccak256(tx))) .map(|tx| H256::from_slice(&keccak256(tx)))
.collect::<Vec<H256>>(); .collect::<Vec<H256>>();
@ -144,7 +136,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let txs = if full_tx { let txs = if full_tx {
let txs_fut = tx_hashes.iter().map(|hash| async move { let txs_fut = tx_hashes.iter().map(|hash| async move {
let mut payloads = BTreeMap::new(); let mut payloads = BTreeMap::new();
payloads.insert(*payload.block_number(), payload.clone()); payloads.insert(payload.block_number, payload.clone());
let tx = self let tx = self
.get_transaction(hash, &payloads) .get_transaction(hash, &payloads)
.await? .await?
@ -163,22 +155,22 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
}; };
Ok(ExecutionBlock { Ok(ExecutionBlock {
number: *payload.block_number(), number: payload.block_number,
base_fee_per_gas: U256::from_little_endian(&payload.base_fee_per_gas().to_bytes_le()), base_fee_per_gas: U256::from_little_endian(&payload.base_fee_per_gas.to_bytes_le()),
difficulty: U256::from(0), difficulty: U256::from(0),
extra_data: payload.extra_data().to_vec(), extra_data: payload.extra_data.to_vec(),
gas_limit: *payload.gas_limit(), gas_limit: payload.gas_limit,
gas_used: *payload.gas_used(), gas_used: payload.gas_used,
hash: H256::from_slice(payload.block_hash()), hash: H256::from_slice(&payload.block_hash),
logs_bloom: payload.logs_bloom().to_vec(), logs_bloom: payload.logs_bloom.to_vec(),
miner: Address::from_slice(payload.fee_recipient()), miner: Address::from_slice(&payload.fee_recipient),
parent_hash: H256::from_slice(payload.parent_hash()), parent_hash: H256::from_slice(&payload.parent_hash),
receipts_root: H256::from_slice(payload.receipts_root()), receipts_root: H256::from_slice(&payload.receipts_root),
state_root: H256::from_slice(payload.state_root()), state_root: H256::from_slice(&payload.state_root),
timestamp: *payload.timestamp(), timestamp: payload.timestamp,
total_difficulty: 0, total_difficulty: 0,
transactions: txs, transactions: txs,
mix_hash: H256::from_slice(payload.prev_randao()), mix_hash: H256::from_slice(&payload.prev_randao),
nonce: empty_nonce, nonce: empty_nonce,
sha3_uncles: H256::from_str(empty_uncle_hash)?, sha3_uncles: H256::from_str(empty_uncle_hash)?,
size: 0, size: 0,
@ -187,21 +179,6 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
}) })
} }
pub async fn get_transaction_by_block_hash_and_index(
&self,
payload: &ExecutionPayload,
index: usize,
) -> Result<Option<Transaction>> {
let tx = payload.transactions()[index].clone();
let tx_hash = H256::from_slice(&keccak256(tx));
let mut payloads = BTreeMap::new();
payloads.insert(*payload.block_number(), payload.clone());
let tx_option = self.get_transaction(&tx_hash, &payloads).await?;
let tx = tx_option.ok_or(eyre::eyre!("not reachable"))?;
Ok(Some(tx))
}
pub async fn get_transaction_receipt( pub async fn get_transaction_receipt(
&self, &self,
tx_hash: &H256, tx_hash: &H256,
@ -222,7 +199,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let payload = payload.unwrap(); let payload = payload.unwrap();
let tx_hashes = payload let tx_hashes = payload
.transactions() .transactions
.iter() .iter()
.map(|tx| H256::from_slice(&keccak256(tx))) .map(|tx| H256::from_slice(&keccak256(tx)))
.collect::<Vec<H256>>(); .collect::<Vec<H256>>();
@ -239,7 +216,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let expected_receipt_root = ordered_trie_root(receipts_encoded); let expected_receipt_root = ordered_trie_root(receipts_encoded);
let expected_receipt_root = H256::from_slice(&expected_receipt_root.to_fixed_bytes()); let expected_receipt_root = H256::from_slice(&expected_receipt_root.to_fixed_bytes());
let payload_receipt_root = H256::from_slice(payload.receipts_root()); let payload_receipt_root = H256::from_slice(&payload.receipts_root);
if expected_receipt_root != payload_receipt_root || !receipts.contains(&receipt) { if expected_receipt_root != payload_receipt_root || !receipts.contains(&receipt) {
return Err(ExecutionError::ReceiptRootMismatch(tx_hash.to_string()).into()); return Err(ExecutionError::ReceiptRootMismatch(tx_hash.to_string()).into());
@ -275,7 +252,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
let tx_encoded = tx.rlp().to_vec(); let tx_encoded = tx.rlp().to_vec();
let txs_encoded = payload let txs_encoded = payload
.transactions() .transactions
.iter() .iter()
.map(|tx| tx.to_vec()) .map(|tx| tx.to_vec())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -283,6 +260,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
if !txs_encoded.contains(&tx_encoded) { if !txs_encoded.contains(&tx_encoded) {
return Err(ExecutionError::MissingTransaction(hash.to_string()).into()); return Err(ExecutionError::MissingTransaction(hash.to_string()).into());
} }
Ok(Some(tx)) Ok(Some(tx))
} }
@ -291,22 +269,7 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
filter: &Filter, filter: &Filter,
payloads: &BTreeMap<u64, ExecutionPayload>, payloads: &BTreeMap<u64, ExecutionPayload>,
) -> Result<Vec<Log>> { ) -> Result<Vec<Log>> {
let filter = filter.clone(); let logs = self.rpc.get_logs(filter).await?;
// avoid fetching logs for a block helios hasn't seen yet
let filter = if filter.get_to_block().is_none() && filter.get_block_hash().is_none() {
let block = *payloads.last_key_value().unwrap().0;
let filter = filter.to_block(block);
if filter.get_from_block().is_none() {
filter.from_block(block)
} else {
filter
}
} else {
filter
};
let logs = self.rpc.get_logs(&filter).await?;
if logs.len() > MAX_SUPPORTED_LOGS_NUMBER { if logs.len() > MAX_SUPPORTED_LOGS_NUMBER {
return Err( return Err(
ExecutionError::TooManyLogsToProve(logs.len(), MAX_SUPPORTED_LOGS_NUMBER).into(), ExecutionError::TooManyLogsToProve(logs.len(), MAX_SUPPORTED_LOGS_NUMBER).into(),
@ -345,120 +308,6 @@ impl<R: ExecutionRpc> ExecutionClient<R> {
} }
Ok(logs) Ok(logs)
} }
pub async fn get_fee_history(
&self,
block_count: u64,
last_block: u64,
_reward_percentiles: &[f64],
payloads: &BTreeMap<u64, ExecutionPayload>,
) -> Result<Option<FeeHistory>> {
// Extract the latest and oldest block numbers from the payloads
let helios_latest_block_number = *payloads
.last_key_value()
.ok_or(ExecutionError::EmptyExecutionPayload())?
.0;
let helios_oldest_block_number = *payloads
.first_key_value()
.ok_or(ExecutionError::EmptyExecutionPayload())?
.0;
// Case where all requested blocks are earlier than Helios' latest block number
// So helios can't prove anything in this range
if last_block < helios_oldest_block_number {
return Err(
ExecutionError::InvalidBlockRange(last_block, helios_latest_block_number).into(),
);
}
// If the requested block is more recent than helios' latest block
// we can only return up to helios' latest block
let mut request_latest_block = last_block;
if request_latest_block > helios_latest_block_number {
request_latest_block = helios_latest_block_number;
}
// Requested oldest block is further out than what helios' synced
let mut request_oldest_block = request_latest_block - block_count;
if request_oldest_block < helios_oldest_block_number {
request_oldest_block = helios_oldest_block_number;
}
// Construct a fee history
let mut fee_history = FeeHistory {
oldest_block: U256::from(request_oldest_block),
base_fee_per_gas: vec![],
gas_used_ratio: vec![],
reward: payloads.iter().map(|_| vec![]).collect::<Vec<Vec<U256>>>(),
};
for block_id in request_oldest_block..=request_latest_block {
let execution_payload = payloads
.get(&block_id)
.ok_or(ExecutionError::EmptyExecutionPayload())?;
let converted_base_fee_per_gas = ethers::types::U256::from_little_endian(
&execution_payload.base_fee_per_gas().to_bytes_le(),
);
fee_history
.base_fee_per_gas
.push(converted_base_fee_per_gas);
let gas_used_ratio_helios = ((*execution_payload.gas_used() as f64
/ *execution_payload.gas_limit() as f64)
* 10.0_f64.powi(12))
.round()
/ 10.0_f64.powi(12);
fee_history.gas_used_ratio.push(gas_used_ratio_helios);
}
Ok(Some(fee_history))
}
}
/// Verifies a fee history against an rpc.
pub async fn verify_fee_history(
rpc: &impl ExecutionRpc,
calculated_fee_history: &FeeHistory,
block_count: u64,
request_latest_block: u64,
reward_percentiles: &[f64],
) -> Result<()> {
let fee_history = rpc
.get_fee_history(block_count, request_latest_block, reward_percentiles)
.await?;
for (_pos, _base_fee_per_gas) in fee_history.base_fee_per_gas.iter().enumerate() {
// Break at last iteration
// Otherwise, this would add an additional block
if _pos == block_count as usize {
continue;
}
// Check base fee per gas
let block_to_check = (fee_history.oldest_block + _pos as u64).as_u64();
let fee_to_check = calculated_fee_history.base_fee_per_gas[_pos];
let gas_ratio_to_check = calculated_fee_history.gas_used_ratio[_pos];
if *_base_fee_per_gas != fee_to_check {
return Err(ExecutionError::InvalidBaseGaseFee(
fee_to_check,
*_base_fee_per_gas,
block_to_check,
)
.into());
}
// Check gas used ratio
let rpc_gas_used_rounded =
(fee_history.gas_used_ratio[_pos] * 10.0_f64.powi(12)).round() / 10.0_f64.powi(12);
if gas_ratio_to_check != rpc_gas_used_rounded {
return Err(ExecutionError::InvalidGasUsedRatio(
gas_ratio_to_check,
rpc_gas_used_rounded,
block_to_check,
)
.into());
}
}
Ok(())
} }
fn encode_receipt(receipt: &TransactionReceipt) -> Vec<u8> { fn encode_receipt(receipt: &TransactionReceipt) -> Vec<u8> {

View File

@ -1,18 +1,18 @@
use std::str::FromStr; use std::str::FromStr;
use async_trait::async_trait; use async_trait::async_trait;
use common::errors::RpcError;
use ethers::prelude::{Address, Http}; use ethers::prelude::{Address, Http};
use ethers::providers::{HttpRateLimitRetryPolicy, Middleware, Provider, RetryClient}; use ethers::providers::{HttpRateLimitRetryPolicy, Middleware, Provider, RetryClient};
use ethers::types::transaction::eip2718::TypedTransaction; use ethers::types::transaction::eip2718::TypedTransaction;
use ethers::types::transaction::eip2930::AccessList; use ethers::types::transaction::eip2930::AccessList;
use ethers::types::{ use ethers::types::{
BlockId, BlockNumber, Bytes, EIP1186ProofResponse, Eip1559TransactionRequest, FeeHistory, BlockId, Bytes, EIP1186ProofResponse, Eip1559TransactionRequest, Filter, Log, Transaction,
Filter, Log, Transaction, TransactionReceipt, H256, U256, TransactionReceipt, H256, U256,
}; };
use eyre::Result; use eyre::Result;
use crate::types::CallOpts; use crate::types::CallOpts;
use common::errors::RpcError;
use super::ExecutionRpc; use super::ExecutionRpc;
@ -27,16 +27,13 @@ impl Clone for HttpRpc {
} }
} }
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
impl ExecutionRpc for HttpRpc { impl ExecutionRpc for HttpRpc {
fn new(rpc: &str) -> Result<Self> { fn new(rpc: &str) -> Result<Self> {
let http = Http::from_str(rpc)?; let http = Http::from_str(rpc)?;
let mut client = RetryClient::new(http, Box::new(HttpRateLimitRetryPolicy), 100, 50); let mut client = RetryClient::new(http, Box::new(HttpRateLimitRetryPolicy), 100, 50);
client.set_compute_units(300); client.set_compute_units(300);
let provider = Provider::new(client); let provider = Provider::new(client);
Ok(HttpRpc { Ok(HttpRpc {
url: rpc.to_string(), url: rpc.to_string(),
provider, provider,
@ -63,7 +60,7 @@ impl ExecutionRpc for HttpRpc {
let block = Some(BlockId::from(block)); let block = Some(BlockId::from(block));
let mut raw_tx = Eip1559TransactionRequest::new(); let mut raw_tx = Eip1559TransactionRequest::new();
raw_tx.to = Some(opts.to.unwrap_or_default().into()); raw_tx.to = Some(opts.to.into());
raw_tx.from = opts.from; raw_tx.from = opts.from;
raw_tx.value = opts.value; raw_tx.value = opts.value;
raw_tx.gas = Some(opts.gas.unwrap_or(U256::from(100_000_000))); raw_tx.gas = Some(opts.gas.unwrap_or(U256::from(100_000_000)));
@ -131,27 +128,4 @@ impl ExecutionRpc for HttpRpc {
.await .await
.map_err(|e| RpcError::new("get_logs", e))?) .map_err(|e| RpcError::new("get_logs", e))?)
} }
async fn chain_id(&self) -> Result<u64> {
Ok(self
.provider
.get_chainid()
.await
.map_err(|e| RpcError::new("chain_id", e))?
.as_u64())
}
async fn get_fee_history(
&self,
block_count: u64,
last_block: u64,
reward_percentiles: &[f64],
) -> Result<FeeHistory> {
let block = BlockNumber::from(last_block);
Ok(self
.provider
.fee_history(block_count, block, reward_percentiles)
.await
.map_err(|e| RpcError::new("fee_history", e))?)
}
} }

View File

@ -3,8 +3,8 @@ use std::{fs::read_to_string, path::PathBuf};
use async_trait::async_trait; use async_trait::async_trait;
use common::utils::hex_str_to_bytes; use common::utils::hex_str_to_bytes;
use ethers::types::{ use ethers::types::{
transaction::eip2930::AccessList, Address, EIP1186ProofResponse, FeeHistory, Filter, Log, transaction::eip2930::AccessList, Address, EIP1186ProofResponse, Filter, Log, Transaction,
Transaction, TransactionReceipt, H256, TransactionReceipt, H256,
}; };
use eyre::{eyre, Result}; use eyre::{eyre, Result};
@ -17,8 +17,7 @@ pub struct MockRpc {
path: PathBuf, path: PathBuf,
} }
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
impl ExecutionRpc for MockRpc { impl ExecutionRpc for MockRpc {
fn new(rpc: &str) -> Result<Self> { fn new(rpc: &str) -> Result<Self> {
let path = PathBuf::from(rpc); let path = PathBuf::from(rpc);
@ -62,18 +61,4 @@ impl ExecutionRpc for MockRpc {
let logs = read_to_string(self.path.join("logs.json"))?; let logs = read_to_string(self.path.join("logs.json"))?;
Ok(serde_json::from_str(&logs)?) Ok(serde_json::from_str(&logs)?)
} }
async fn chain_id(&self) -> Result<u64> {
Err(eyre!("not implemented"))
}
async fn get_fee_history(
&self,
_block_count: u64,
_last_block: u64,
_reward_percentiles: &[f64],
) -> Result<FeeHistory> {
let fee_history = read_to_string(self.path.join("fee_history.json"))?;
Ok(serde_json::from_str(&fee_history)?)
}
} }

View File

@ -1,7 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use ethers::types::{ use ethers::types::{
transaction::eip2930::AccessList, Address, EIP1186ProofResponse, FeeHistory, Filter, Log, transaction::eip2930::AccessList, Address, EIP1186ProofResponse, Filter, Log, Transaction,
Transaction, TransactionReceipt, H256, TransactionReceipt, H256,
}; };
use eyre::Result; use eyre::Result;
@ -10,8 +10,7 @@ use crate::types::CallOpts;
pub mod http_rpc; pub mod http_rpc;
pub mod mock_rpc; pub mod mock_rpc;
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[async_trait]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
pub trait ExecutionRpc: Send + Clone + Sync + 'static { pub trait ExecutionRpc: Send + Clone + Sync + 'static {
fn new(rpc: &str) -> Result<Self> fn new(rpc: &str) -> Result<Self>
where where
@ -30,11 +29,4 @@ pub trait ExecutionRpc: Send + Clone + Sync + 'static {
async fn get_transaction_receipt(&self, tx_hash: &H256) -> Result<Option<TransactionReceipt>>; async fn get_transaction_receipt(&self, tx_hash: &H256) -> Result<Option<TransactionReceipt>>;
async fn get_transaction(&self, tx_hash: &H256) -> Result<Option<Transaction>>; async fn get_transaction(&self, tx_hash: &H256) -> Result<Option<Transaction>>;
async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>>; async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>>;
async fn chain_id(&self) -> Result<u64>;
async fn get_fee_history(
&self,
block_count: u64,
last_block: u64,
reward_percentiles: &[f64],
) -> Result<FeeHistory>;
} }

View File

@ -19,7 +19,7 @@ pub struct Account {
pub slots: HashMap<H256, U256>, pub slots: HashMap<H256, U256>,
} }
#[derive(Deserialize, Serialize, Debug, Clone)] #[derive(Deserialize, Serialize, Debug)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct ExecutionBlock { pub struct ExecutionBlock {
#[serde(serialize_with = "serialize_u64_string")] #[serde(serialize_with = "serialize_u64_string")]
@ -54,17 +54,17 @@ pub struct ExecutionBlock {
pub uncles: Vec<H256>, pub uncles: Vec<H256>,
} }
#[derive(Deserialize, Serialize, Debug, Clone)] #[derive(Deserialize, Serialize, Debug)]
pub enum Transactions { pub enum Transactions {
Hashes(Vec<H256>), Hashes(Vec<H256>),
Full(Vec<Transaction>), Full(Vec<Transaction>),
} }
#[derive(Deserialize, Serialize, Clone)] #[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct CallOpts { pub struct CallOpts {
pub from: Option<Address>, pub from: Option<Address>,
pub to: Option<Address>, pub to: Address,
pub gas: Option<U256>, pub gas: Option<U256>,
pub gas_price: Option<U256>, pub gas_price: Option<U256>,
pub value: Option<U256>, pub value: Option<U256>,
@ -90,7 +90,7 @@ where
let bytes: Option<String> = serde::Deserialize::deserialize(deserializer)?; let bytes: Option<String> = serde::Deserialize::deserialize(deserializer)?;
match bytes { match bytes {
Some(bytes) => { Some(bytes) => {
let bytes = hex::decode(bytes.strip_prefix("0x").unwrap_or("")).unwrap_or_default(); let bytes = hex::decode(bytes.strip_prefix("0x").unwrap()).unwrap();
Ok(Some(bytes.to_vec())) Ok(Some(bytes.to_vec()))
} }
None => Ok(None), None => Ok(None),

View File

@ -1,35 +0,0 @@
{
"id": "1",
"jsonrpc": "2.0",
"result": {
"oldestBlock": 10762137,
"reward": [
[
"0x4a817c7ee",
"0x4a817c7ee"
], [
"0x773593f0",
"0x773593f5"
], [
"0x0",
"0x0"
], [
"0x773593f5",
"0x773bae75"
]
],
"baseFeePerGas": [
"0x12",
"0x10",
"0x10",
"0xe",
"0xd"
],
"gasUsedRatio": [
0.026089875,
0.406803,
0,
0.0866665
]
}
}

View File

@ -5,7 +5,7 @@ use ethers::types::{Address, Filter, H256, U256};
use ssz_rs::{List, Vector}; use ssz_rs::{List, Vector};
use common::utils::hex_str_to_bytes; use common::utils::hex_str_to_bytes;
use consensus::types::{ExecutionPayload, ExecutionPayloadBellatrix}; use consensus::types::ExecutionPayload;
use execution::rpc::mock_rpc::MockRpc; use execution::rpc::mock_rpc::MockRpc;
use execution::ExecutionClient; use execution::ExecutionClient;
@ -18,13 +18,13 @@ async fn test_get_account() {
let execution = get_client(); let execution = get_client();
let address = Address::from_str("14f9D4aF749609c1438528C0Cce1cC3f6D411c47").unwrap(); let address = Address::from_str("14f9D4aF749609c1438528C0Cce1cC3f6D411c47").unwrap();
let payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { let payload = ExecutionPayload {
state_root: Vector::from_iter( state_root: Vector::from_iter(
hex_str_to_bytes("0xaa02f5db2ee75e3da400d10f3c30e894b6016ce8a2501680380a907b6674ce0d") hex_str_to_bytes("0xaa02f5db2ee75e3da400d10f3c30e894b6016ce8a2501680380a907b6674ce0d")
.unwrap(), .unwrap(),
), ),
..ExecutionPayloadBellatrix::default() ..ExecutionPayload::default()
}); };
let account = execution let account = execution
.get_account(&address, None, &payload) .get_account(&address, None, &payload)
@ -55,7 +55,7 @@ async fn test_get_tx() {
H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap(); H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap();
let mut payload = ExecutionPayload::default(); let mut payload = ExecutionPayload::default();
payload.transactions_mut().push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap())); payload.transactions.push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap()));
let mut payloads = BTreeMap::new(); let mut payloads = BTreeMap::new();
payloads.insert(7530933, payload); payloads.insert(7530933, payload);
@ -104,15 +104,15 @@ async fn test_get_tx_not_included() {
#[tokio::test] #[tokio::test]
async fn test_get_logs() { async fn test_get_logs() {
let execution = get_client(); let execution = get_client();
let mut payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { let mut payload = ExecutionPayload {
receipts_root: Vector::from_iter( receipts_root: Vector::from_iter(
hex_str_to_bytes("dd82a78eccb333854f0c99e5632906e092d8a49c27a21c25cae12b82ec2a113f") hex_str_to_bytes("dd82a78eccb333854f0c99e5632906e092d8a49c27a21c25cae12b82ec2a113f")
.unwrap(), .unwrap(),
), ),
..ExecutionPayloadBellatrix::default() ..ExecutionPayload::default()
}); };
payload.transactions_mut().push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap())); payload.transactions.push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap()));
let mut payloads = BTreeMap::new(); let mut payloads = BTreeMap::new();
payloads.insert(7530933, payload); payloads.insert(7530933, payload);
@ -134,15 +134,15 @@ async fn test_get_receipt() {
let tx_hash = let tx_hash =
H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap(); H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap();
let mut payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { let mut payload = ExecutionPayload {
receipts_root: Vector::from_iter( receipts_root: Vector::from_iter(
hex_str_to_bytes("dd82a78eccb333854f0c99e5632906e092d8a49c27a21c25cae12b82ec2a113f") hex_str_to_bytes("dd82a78eccb333854f0c99e5632906e092d8a49c27a21c25cae12b82ec2a113f")
.unwrap(), .unwrap(),
), ),
..ExecutionPayloadBellatrix::default() ..ExecutionPayload::default()
}); };
payload.transactions_mut().push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap())); payload.transactions.push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap()));
let mut payloads = BTreeMap::new(); let mut payloads = BTreeMap::new();
payloads.insert(7530933, payload); payloads.insert(7530933, payload);
@ -163,7 +163,7 @@ async fn test_get_receipt_bad_proof() {
H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap(); H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap();
let mut payload = ExecutionPayload::default(); let mut payload = ExecutionPayload::default();
payload.transactions_mut().push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap())); payload.transactions.push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap()));
let mut payloads = BTreeMap::new(); let mut payloads = BTreeMap::new();
payloads.insert(7530933, payload); payloads.insert(7530933, payload);
@ -191,32 +191,12 @@ async fn test_get_receipt_not_included() {
#[tokio::test] #[tokio::test]
async fn test_get_block() { async fn test_get_block() {
let execution = get_client(); let execution = get_client();
let payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { let payload = ExecutionPayload {
block_number: 12345, block_number: 12345,
..ExecutionPayloadBellatrix::default() ..ExecutionPayload::default()
}); };
let block = execution.get_block(&payload, false).await.unwrap(); let block = execution.get_block(&payload, false).await.unwrap();
assert_eq!(block.number, 12345); assert_eq!(block.number, 12345);
} }
#[tokio::test]
async fn test_get_tx_by_block_hash_and_index() {
let execution = get_client();
let tx_hash =
H256::from_str("2dac1b27ab58b493f902dda8b63979a112398d747f1761c0891777c0983e591f").unwrap();
let mut payload = ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix {
block_number: 7530933,
..ExecutionPayloadBellatrix::default()
});
payload.transactions_mut().push(List::from_iter(hex_str_to_bytes("0x02f8b20583623355849502f900849502f91082ea6094326c977e6efc84e512bb9c30f76e30c160ed06fb80b844a9059cbb0000000000000000000000007daccf9b3c1ae2fa5c55f1c978aeef700bc83be0000000000000000000000000000000000000000000000001158e460913d00000c080a0e1445466b058b6f883c0222f1b1f3e2ad9bee7b5f688813d86e3fa8f93aa868ca0786d6e7f3aefa8fe73857c65c32e4884d8ba38d0ecfb947fbffb82e8ee80c167").unwrap()));
let tx = execution
.get_transaction_by_block_hash_and_index(&payload, 0)
.await
.unwrap()
.unwrap();
assert_eq!(tx.hash(), tx_hash);
}

View File

@ -1,32 +0,0 @@
[package]
name = "helios-ts"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
wasm-bindgen = "0.2.84"
wasm-bindgen-futures = "0.4.33"
serde-wasm-bindgen = "0.4.5"
console_error_panic_hook = "0.1.7"
ethers = "1.0.0"
hex = "0.4.3"
serde = { version = "1.0.143", features = ["derive"] }
serde_json = "1.0.85"
client = { path = "../client" }
common = { path = "../common" }
consensus = { path = "../consensus" }
execution = { path = "../execution" }
config = { path = "../config" }
[dependencies.web-sys]
version = "0.3"
features = [
"console",
]

View File

@ -1,24 +0,0 @@
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8" />
<title>hello-wasm example</title>
</head>
<body>
<script src="./dist/lib.js"></script>
<script src="https://cdn.ethers.io/lib/ethers-5.2.umd.min.js"></script>
<script>
const config = {
executionRpc:"http://localhost:9001/proxy",
consensusRpc: "http://localhost:9002/proxy",
checkpoint: "0x372342db81e3a42527e08dc19e33cd4f91f440f45b9ddb0a9865d407eceb08e4",
};
helios.createHeliosProvider(config).then(heliosProvider => {
heliosProvider.sync().then(() => {
window.provider = new ethers.providers.Web3Provider(heliosProvider);
});
});
</script>
</body>
</html>

View File

@ -1,111 +0,0 @@
import init, { Client } from "./pkg/index";
export async function createHeliosProvider(config: Config): Promise<HeliosProvider> {
const wasmData = require("./pkg/index_bg.wasm");
await init(wasmData);
return new HeliosProvider(config);
}
/// An EIP-1193 compliant Ethereum provider. Treat this the same as you
/// would window.ethereum when constructing an ethers or web3 provider.
export class HeliosProvider {
#client;
#chainId;
/// Do not use this constructor. Instead use the createHeliosProvider function.
constructor(config: Config) {
const executionRpc = config.executionRpc;
const consensusRpc = config.consensusRpc;
const checkpoint = config.checkpoint;
const network = config.network ?? Network.MAINNET;
this.#client = new Client(executionRpc, consensusRpc, network, checkpoint);
this.#chainId = this.#client.chain_id();
}
async sync() {
await this.#client.sync();
}
async request(req: Request): Promise<any> {
switch(req.method) {
case "eth_getBalance": {
return this.#client.get_balance(req.params[0], req.params[1]);
};
case "eth_chainId": {
return this.#chainId;
};
case "eth_blockNumber": {
return this.#client.get_block_number();
};
case "eth_getTransactionByHash": {
let tx = await this.#client.get_transaction_by_hash(req.params[0]);
return mapToObj(tx);
};
case "eth_getTransactionCount": {
return this.#client.get_transaction_count(req.params[0], req.params[1]);
};
case "eth_getBlockTransactionCountByHash": {
return this.#client.get_block_transaction_count_by_hash(req.params[0]);
};
case "eth_getBlockTransactionCountByNumber": {
return this.#client.get_block_transaction_count_by_number(req.params[0]);
};
case "eth_getCode": {
return this.#client.get_code(req.params[0], req.params[1]);
};
case "eth_call": {
return this.#client.call(req.params[0], req.params[1]);
};
case "eth_estimateGas": {
return this.#client.estimate_gas(req.params[0]);
};
case "eth_gasPrice": {
return this.#client.gas_price();
};
case "eth_maxPriorityFeePerGas": {
return this.#client.max_priority_fee_per_gas();
};
case "eth_sendRawTransaction": {
return this.#client.send_raw_transaction(req.params[0]);
};
case "eth_getTransactionReceipt": {
return this.#client.get_transaction_receipt(req.params[0]);
};
case "eth_getLogs": {
return this.#client.get_logs(req.params[0]);
};
case "net_version": {
return this.#chainId;
};
}
}
}
export type Config = {
executionRpc: string,
consensusRpc?: string,
checkpoint?: string,
network?: Network,
}
export enum Network {
MAINNET = "mainnet",
GOERLI = "goerli",
}
type Request = {
method: string,
params: any[],
}
function mapToObj(map: Map<any, any> | undefined): Object | undefined {
if(!map) return undefined;
return Array.from(map).reduce((obj: any, [key, value]) => {
obj[key] = value;
return obj;
}, {});
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +0,0 @@
{
"name": "helios",
"version": "0.1.0",
"main": "./dist/lib.js",
"types": "./dist/lib.d.ts",
"scripts": {
"build": "webpack"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@wasm-tool/wasm-pack-plugin": "^1.6.0",
"ts-loader": "^9.4.1",
"typescript": "^4.9.3",
"webpack": "^5.75.0",
"webpack-cli": "^5.0.0"
},
"dependencies": {
"ethers": "^5.7.2"
}
}

View File

@ -1,7 +0,0 @@
set -e
(&>/dev/null lcp --proxyUrl https://eth-mainnet.g.alchemy.com/v2/23IavJytUwkTtBMpzt_TZKwgwAarocdT --port 9001 &)
(&>/dev/null lcp --proxyUrl https://www.lightclientdata.org --port 9002 &)
npm run build
simple-http-server

View File

@ -1,185 +0,0 @@
extern crate console_error_panic_hook;
extern crate web_sys;
use std::str::FromStr;
use common::types::BlockTag;
use ethers::types::{Address, Filter, H256};
use execution::types::CallOpts;
use wasm_bindgen::prelude::*;
use client::database::ConfigDB;
use config::{networks, Config};
#[allow(unused_macros)]
macro_rules! log {
( $( $t:tt )* ) => {
web_sys::console::log_1(&format!( $( $t )* ).into());
}
}
#[wasm_bindgen]
pub struct Client {
inner: client::Client<ConfigDB>,
chain_id: u64,
}
#[wasm_bindgen]
impl Client {
#[wasm_bindgen(constructor)]
pub fn new(
execution_rpc: String,
consensus_rpc: Option<String>,
network: String,
checkpoint: Option<String>,
) -> Self {
console_error_panic_hook::set_once();
let base = match network.as_str() {
"mainnet" => networks::mainnet(),
"goerli" => networks::goerli(),
_ => panic!("invalid network"),
};
let chain_id = base.chain.chain_id;
let checkpoint = Some(
checkpoint
.as_ref()
.map(|c| c.strip_prefix("0x").unwrap_or(c.as_str()))
.map(|c| hex::decode(c).unwrap())
.unwrap_or(base.default_checkpoint),
);
let consensus_rpc = consensus_rpc.unwrap_or(base.consensus_rpc.unwrap());
let config = Config {
execution_rpc,
consensus_rpc,
checkpoint,
chain: base.chain,
forks: base.forks,
..Default::default()
};
let inner: client::Client<ConfigDB> =
client::ClientBuilder::new().config(config).build().unwrap();
Self { inner, chain_id }
}
#[wasm_bindgen]
pub async fn sync(&mut self) {
self.inner.start().await.unwrap()
}
#[wasm_bindgen]
pub fn chain_id(&self) -> u32 {
self.chain_id as u32
}
#[wasm_bindgen]
pub async fn get_block_number(&self) -> u32 {
self.inner.get_block_number().await.unwrap() as u32
}
#[wasm_bindgen]
pub async fn get_balance(&self, addr: JsValue, block: JsValue) -> String {
let addr: Address = serde_wasm_bindgen::from_value(addr).unwrap();
let block: BlockTag = serde_wasm_bindgen::from_value(block).unwrap();
self.inner
.get_balance(&addr, block)
.await
.unwrap()
.to_string()
}
#[wasm_bindgen]
pub async fn get_transaction_by_hash(&self, hash: String) -> JsValue {
let hash = H256::from_str(&hash).unwrap();
let tx = self.inner.get_transaction_by_hash(&hash).await.unwrap();
serde_wasm_bindgen::to_value(&tx).unwrap()
}
#[wasm_bindgen]
pub async fn get_transaction_count(&self, addr: JsValue, block: JsValue) -> u32 {
let addr: Address = serde_wasm_bindgen::from_value(addr).unwrap();
let block: BlockTag = serde_wasm_bindgen::from_value(block).unwrap();
self.inner.get_nonce(&addr, block).await.unwrap() as u32
}
#[wasm_bindgen]
pub async fn get_block_transaction_count_by_hash(&self, hash: JsValue) -> u32 {
let hash: H256 = serde_wasm_bindgen::from_value(hash).unwrap();
self.inner
.get_block_transaction_count_by_hash(&hash.as_bytes().to_vec())
.await
.unwrap() as u32
}
#[wasm_bindgen]
pub async fn get_block_transaction_count_by_number(&self, block: JsValue) -> u32 {
let block: BlockTag = serde_wasm_bindgen::from_value(block).unwrap();
self.inner
.get_block_transaction_count_by_number(block)
.await
.unwrap() as u32
}
#[wasm_bindgen]
pub async fn get_code(&self, addr: JsValue, block: JsValue) -> String {
let addr: Address = serde_wasm_bindgen::from_value(addr).unwrap();
let block: BlockTag = serde_wasm_bindgen::from_value(block).unwrap();
let code = self.inner.get_code(&addr, block).await.unwrap();
format!("0x{}", hex::encode(code))
}
#[wasm_bindgen]
pub async fn call(&self, opts: JsValue, block: JsValue) -> String {
let opts: CallOpts = serde_wasm_bindgen::from_value(opts).unwrap();
let block: BlockTag = serde_wasm_bindgen::from_value(block).unwrap();
let res = self.inner.call(&opts, block).await.unwrap();
format!("0x{}", hex::encode(res))
}
#[wasm_bindgen]
pub async fn estimate_gas(&self, opts: JsValue) -> u32 {
let opts: CallOpts = serde_wasm_bindgen::from_value(opts).unwrap();
self.inner.estimate_gas(&opts).await.unwrap() as u32
}
#[wasm_bindgen]
pub async fn gas_price(&self) -> JsValue {
let price = self.inner.get_gas_price().await.unwrap();
serde_wasm_bindgen::to_value(&price).unwrap()
}
#[wasm_bindgen]
pub async fn max_priority_fee_per_gas(&self) -> JsValue {
let price = self.inner.get_priority_fee().await.unwrap();
serde_wasm_bindgen::to_value(&price).unwrap()
}
#[wasm_bindgen]
pub async fn send_raw_transaction(&self, tx: String) -> JsValue {
let tx = hex::decode(tx).unwrap();
let hash = self.inner.send_raw_transaction(&tx).await.unwrap();
serde_wasm_bindgen::to_value(&hash).unwrap()
}
#[wasm_bindgen]
pub async fn get_transaction_receipt(&self, tx: JsValue) -> JsValue {
let tx: H256 = serde_wasm_bindgen::from_value(tx).unwrap();
let receipt = self.inner.get_transaction_receipt(&tx).await.unwrap();
serde_wasm_bindgen::to_value(&receipt).unwrap()
}
#[wasm_bindgen]
pub async fn get_logs(&self, filter: JsValue) -> JsValue {
let filter: Filter = serde_wasm_bindgen::from_value(filter).unwrap();
let logs = self.inner.get_logs(&filter).await.unwrap();
serde_wasm_bindgen::to_value(&logs).unwrap()
}
}

View File

@ -1,13 +0,0 @@
{
"compilerOptions": {
"outDir": "./dist/",
"noImplicitAny": true,
"module": "es6",
"target": "es6",
"jsx": "react",
"allowJs": true,
"moduleResolution": "node",
"sourceMap": true,
"declaration": true
}
}

View File

@ -1,40 +0,0 @@
const path = require("path");
const WasmPackPlugin = require("@wasm-tool/wasm-pack-plugin");
module.exports = {
entry: "./lib.ts",
module: {
rules: [
{
test: /\.ts?$/,
use: 'ts-loader',
exclude: /node_modules/,
},
{
test: /\.wasm$/,
type: "asset/inline",
},
],
},
resolve: {
extensions: ['.ts', '.js'],
},
output: {
filename: "lib.js",
globalObject: 'this',
path: path.resolve(__dirname, "dist"),
library: {
name: "helios",
type: "umd",
}
},
experiments: {
asyncWebAssembly: true,
},
plugins: [
new WasmPackPlugin({
extraArgs: "--target web",
crateDirectory: path.resolve(__dirname),
}),
],
};

4
rpc.md
View File

@ -22,6 +22,4 @@ Helios provides a variety of RPC methods for interacting with the Ethereum netwo
| `eth_getLogs` | `get_logs` | Returns an array of logs matching the filter. | `client.get_logs(&self, filter: Filter)` | | `eth_getLogs` | `get_logs` | Returns an array of logs matching the filter. | `client.get_logs(&self, filter: Filter)` |
| `eth_getStorageAt` | `get_storage_at` | Returns the value from a storage position at a given address. | `client.get_storage_at(&self, address: &str, slot: H256, block: BlockTag)` | | `eth_getStorageAt` | `get_storage_at` | Returns the value from a storage position at a given address. | `client.get_storage_at(&self, address: &str, slot: H256, block: BlockTag)` |
| `eth_getBlockTransactionCountByHash` | `get_block_transaction_count_by_hash` | Returns the number of transactions in a block from a block matching the transaction hash. | `client.get_block_transaction_count_by_hash(&self, hash: &str)` | | `eth_getBlockTransactionCountByHash` | `get_block_transaction_count_by_hash` | Returns the number of transactions in a block from a block matching the transaction hash. | `client.get_block_transaction_count_by_hash(&self, hash: &str)` |
| `eth_getBlockTransactionCountByNumber` | `get_block_transaction_count_by_number` | Returns the number of transactions in a block from a block matching the block number. | `client.get_block_transaction_count_by_number(&self, block: BlockTag)` | | `eth_getBlockTransactionCountByNumber` | `get_block_transaction_count_by_number` | Returns the number of transactions in a block from a block matching the block number. | `client.get_block_transaction_count_by_number(&self, block: BlockTag)` |
| `eth_coinbase` | `get_coinbase` | Returns the client coinbase address. | `client.get_coinbase(&self)` |
| `eth_syncing` | `syncing` | Returns an object with data about the sync status or false. | `client.syncing(&self)` |

View File

@ -1 +1 @@
nightly-2023-01-23 nightly

View File

@ -51,18 +51,16 @@
//! Errors used across helios. //! Errors used across helios.
pub mod client { pub mod client {
#[cfg(not(target_arch = "wasm32"))] pub use client::{database::FileDB, Client, ClientBuilder};
pub use client::database::FileDB;
pub use client::{database::ConfigDB, Client, ClientBuilder};
} }
pub mod config { pub mod config {
pub use config::{checkpoints, networks, Config}; pub use config::{networks, Config};
} }
pub mod types { pub mod types {
pub use common::types::BlockTag; pub use common::types::BlockTag;
pub use execution::types::{Account, CallOpts, ExecutionBlock, Transactions}; pub use execution::types::{CallOpts, ExecutionBlock};
} }
pub mod errors { pub mod errors {

View File

@ -1,122 +0,0 @@
use env_logger::Env;
use eyre::Result;
use helios::{config::networks::Network, prelude::*};
use std::time::Duration;
use std::{env, path::PathBuf};
#[tokio::test]
async fn feehistory() -> Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
// Client Configuration
let api_key = env::var("MAINNET_RPC_URL").expect("MAINNET_RPC_URL env variable missing");
let checkpoint = "0x4d9b87a319c52e54068b7727a93dd3d52b83f7336ed93707bcdf7b37aefce700";
let consensus_rpc = "https://www.lightclientdata.org";
let data_dir = "/tmp/helios";
log::info!("Using consensus RPC URL: {}", consensus_rpc);
// Instantiate Client
let mut client: Client<FileDB> = ClientBuilder::new()
.network(Network::MAINNET)
.consensus_rpc(consensus_rpc)
.execution_rpc(&api_key)
.checkpoint(checkpoint)
.load_external_fallback()
.data_dir(PathBuf::from(data_dir))
.build()?;
log::info!(
"Built client on \"{}\" with external checkpoint fallbacks",
Network::MAINNET
);
client.start().await?;
// Wait for syncing
std::thread::sleep(Duration::from_secs(5));
// Get inputs for fee_history calls
let head_block_num = client.get_block_number().await?;
log::info!("head_block_num: {}", &head_block_num);
let block = BlockTag::Latest;
let block_number = BlockTag::Number(head_block_num);
log::info!("block {:?} and block_number {:?}", block, block_number);
let reward_percentiles: Vec<f64> = vec![];
// Get fee history for 1 block back from latest
let fee_history = client
.get_fee_history(1, head_block_num, &reward_percentiles)
.await?
.unwrap();
assert_eq!(fee_history.base_fee_per_gas.len(), 2);
assert_eq!(fee_history.oldest_block.as_u64(), head_block_num - 1);
// Fetch 10000 delta, helios will return as many as it can
let fee_history = match client
.get_fee_history(10_000, head_block_num, &reward_percentiles)
.await?
{
Some(fee_history) => fee_history,
None => panic!(
"empty gas fee returned with inputs: Block count: {:?}, Head Block #: {:?}, Reward Percentiles: {:?}",
10_000, head_block_num, &reward_percentiles
),
};
assert!(
!fee_history.base_fee_per_gas.is_empty(),
"fee_history.base_fee_per_gas.len() {:?}",
fee_history.base_fee_per_gas.len()
);
// Fetch 10000 blocks in the past
// Helios will error since it won't have those historical blocks
let fee_history = client
.get_fee_history(1, head_block_num - 10_000, &reward_percentiles)
.await;
assert!(fee_history.is_err(), "fee_history() {fee_history:?}");
// Fetch 20 block away
// Should return array of size 21: our 20 block of interest + the next one
// The oldest block should be 19 block away, including it
let fee_history = client
.get_fee_history(20, head_block_num, &reward_percentiles)
.await?
.unwrap();
assert_eq!(
fee_history.base_fee_per_gas.len(),
21,
"fee_history.base_fee_per_gas.len() {:?} vs 21",
fee_history.base_fee_per_gas.len()
);
assert_eq!(
fee_history.oldest_block.as_u64(),
head_block_num - 20,
"fee_history.oldest_block.as_u64() {:?} vs head_block_num {:?} - 19",
fee_history.oldest_block.as_u64(),
head_block_num
);
// Fetch whatever blocks ahead, but that will fetch one block behind.
// This should return an answer of size two as Helios will cap this request to the newest block it knows
// we refresh parameters to make sure head_block_num is in line with newest block of our payload
let head_block_num = client.get_block_number().await?;
let fee_history = client
.get_fee_history(1, head_block_num + 1000, &reward_percentiles)
.await?
.unwrap();
assert_eq!(
fee_history.base_fee_per_gas.len(),
2,
"fee_history.base_fee_per_gas.len() {:?} vs 2",
fee_history.base_fee_per_gas.len()
);
assert_eq!(
fee_history.oldest_block.as_u64(),
head_block_num - 1,
"fee_history.oldest_block.as_u64() {:?} vs head_block_num {:?}",
fee_history.oldest_block.as_u64(),
head_block_num
);
Ok(())
}