chore(solc): always use sync sources reading (#1667)

This commit is contained in:
Matthias Seitz 2022-11-09 00:08:41 +01:00 committed by GitHub
parent a056357b7c
commit e37a34cba6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 24 additions and 21 deletions

View File

@ -69,6 +69,7 @@ harness = false
[[bench]] [[bench]]
name = "read_all" name = "read_all"
required-features = ["project-util"]
harness = false harness = false
[[test]] [[test]]

View File

@ -4,7 +4,7 @@ extern crate criterion;
use criterion::Criterion; use criterion::Criterion;
use ethers_core::rand; use ethers_core::rand;
use ethers_solc::artifacts::Source; use ethers_solc::{artifacts::Source, project_util::TempProject};
use rand::{distributions::Alphanumeric, Rng}; use rand::{distributions::Alphanumeric, Rng};
use std::{ use std::{
fs::File, fs::File,
@ -14,10 +14,26 @@ use std::{
fn read_all_benchmark(c: &mut Criterion) { fn read_all_benchmark(c: &mut Criterion) {
let root = tempfile::tempdir().unwrap(); let root = tempfile::tempdir().unwrap();
let inputs = prepare_contracts(root.path(), 8); let inputs = prepare_contracts(root.path(), 35);
let mut group = c.benchmark_group("read many"); let mut group = c.benchmark_group("read many");
group.sample_size(10); group.bench_function("sequential", |b| {
b.iter(|| {
Source::read_all(&inputs).unwrap();
});
});
group.bench_function("parallel", |b| {
b.iter(|| {
Source::par_read_all(&inputs).unwrap();
});
});
}
fn read_solmate(c: &mut Criterion) {
let prj = TempProject::checkout("transmissions11/solmate").unwrap();
let inputs = ethers_solc::utils::source_files(prj.sources_path());
let mut group = c.benchmark_group("read solmate");
group.bench_function("sequential", |b| { group.bench_function("sequential", |b| {
b.iter(|| { b.iter(|| {
Source::read_all(&inputs).unwrap(); Source::read_all(&inputs).unwrap();
@ -40,7 +56,7 @@ fn prepare_contracts(root: &Path, num: usize) -> Vec<PathBuf> {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
// let's assume a solidity file is between 2kb and 16kb // let's assume a solidity file is between 2kb and 16kb
let n: usize = rng.gen_range(2..17); let n: usize = rng.gen_range(4..17);
let s: String = rng.sample_iter(&Alphanumeric).take(n * 1024).map(char::from).collect(); let s: String = rng.sample_iter(&Alphanumeric).take(n * 1024).map(char::from).collect();
writer.write_all(s.as_bytes()).unwrap(); writer.write_all(s.as_bytes()).unwrap();
writer.flush().unwrap(); writer.flush().unwrap();
@ -49,5 +65,5 @@ fn prepare_contracts(root: &Path, num: usize) -> Vec<PathBuf> {
files files
} }
criterion_group!(benches, read_all_benchmark); criterion_group!(benches, read_all_benchmark, read_solmate);
criterion_main!(benches); criterion_main!(benches);

View File

@ -1187,10 +1187,6 @@ pub struct Source {
} }
impl Source { impl Source {
/// this is a heuristically measured threshold at which we can generally expect a speedup by
/// using rayon's `par_iter`, See `Self::read_all_files`
pub const NUM_READ_PAR: usize = 8;
/// Reads the file content /// Reads the file content
pub fn read(file: impl AsRef<Path>) -> Result<Self, SolcIoError> { pub fn read(file: impl AsRef<Path>) -> Result<Self, SolcIoError> {
let file = file.as_ref(); let file = file.as_ref();
@ -1206,17 +1202,7 @@ impl Source {
/// ///
/// Depending on the len of the vec it will try to read the files in parallel /// Depending on the len of the vec it will try to read the files in parallel
pub fn read_all_files(files: Vec<PathBuf>) -> Result<Sources, SolcIoError> { pub fn read_all_files(files: Vec<PathBuf>) -> Result<Sources, SolcIoError> {
use rayon::prelude::*;
if files.len() < Self::NUM_READ_PAR {
Self::read_all(files) Self::read_all(files)
} else {
files
.par_iter()
.map(Into::into)
.map(|file| Self::read(&file).map(|source| (file, source)))
.collect()
}
} }
/// Reads all files /// Reads all files
@ -1235,7 +1221,7 @@ impl Source {
/// Parallelized version of `Self::read_all` that reads all files using a parallel iterator /// Parallelized version of `Self::read_all` that reads all files using a parallel iterator
/// ///
/// NOTE: this is only expected to be faster than `Self::read_all` if the given iterator /// NOTE: this is only expected to be faster than `Self::read_all` if the given iterator
/// contains at least several paths. see also `Self::read_all_files`. /// contains at least several paths or the files are rather large.
pub fn par_read_all<T, I>(files: I) -> Result<Sources, SolcIoError> pub fn par_read_all<T, I>(files: I) -> Result<Sources, SolcIoError>
where where
I: IntoIterator<Item = T>, I: IntoIterator<Item = T>,