diff --git a/Cargo.lock b/Cargo.lock index ec6377f8aa0..c10cb8e0500 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12231,6 +12231,7 @@ dependencies = [ name = "starknet_committer" version = "0.0.0" dependencies = [ + "csv", "ethnum", "hex", "pretty_assertions", @@ -12288,7 +12289,6 @@ name = "starknet_committer_cli" version = "0.0.0" dependencies = [ "clap", - "csv", "rand 0.8.5", "starknet_committer", "starknet_committer_and_os_cli", diff --git a/crates/starknet_committer/Cargo.toml b/crates/starknet_committer/Cargo.toml index cee16317727..e918d407ee6 100644 --- a/crates/starknet_committer/Cargo.toml +++ b/crates/starknet_committer/Cargo.toml @@ -8,8 +8,10 @@ description = "Computes and manages Starknet state." [features] testing = ["starknet_patricia/testing"] +benchmark = ["csv"] [dependencies] +csv = { workspace = true, optional = true } ethnum.workspace = true hex.workspace = true pretty_assertions.workspace = true diff --git a/crates/starknet_committer/src/block_committer.rs b/crates/starknet_committer/src/block_committer.rs index ed46649dab1..ab72962943e 100644 --- a/crates/starknet_committer/src/block_committer.rs +++ b/crates/starknet_committer/src/block_committer.rs @@ -5,3 +5,5 @@ pub mod input; pub mod random_structs; #[cfg(any(feature = "testing", test))] pub mod state_diff_generator; +#[cfg(feature = "benchmark")] +pub mod timing_util; diff --git a/crates/starknet_committer/src/block_committer/timing_util.rs b/crates/starknet_committer/src/block_committer/timing_util.rs new file mode 100644 index 00000000000..c23168d1fa3 --- /dev/null +++ b/crates/starknet_committer/src/block_committer/timing_util.rs @@ -0,0 +1,144 @@ +use std::fs::{self, File}; +use std::time::Instant; + +use csv::Writer; +use tracing::info; +pub struct TimeMeasurement { + timer: Option, + total_time: u128, // Total duration of all blocks (milliseconds). + per_fact_durations: Vec, // Average duration (microseconds) per new fact in a block. + n_facts: Vec, + block_durations: Vec, // Duration of a block (milliseconds). + facts_in_db: Vec, // Number of facts in the DB prior to the current block. + total_facts: usize, +} + +impl TimeMeasurement { + pub fn new(n_iterations: usize) -> Self { + Self { + timer: None, + total_time: 0, + per_fact_durations: Vec::with_capacity(n_iterations), + n_facts: Vec::with_capacity(n_iterations), + block_durations: Vec::with_capacity(n_iterations), + facts_in_db: Vec::with_capacity(n_iterations), + total_facts: 0, + } + } + + pub fn start_measurement(&mut self) { + self.timer = Some(Instant::now()); + } + + pub fn stop_measurement(&mut self, facts_count: usize) { + let duration = + self.timer.expect("stop_measurement called before start_measurement").elapsed(); + info!( + "Time elapsed for iteration {}: {} milliseconds", + self.n_results(), + duration.as_millis() + ); + let millis = duration.as_millis(); + self.total_time += millis; + #[allow(clippy::as_conversions)] + self.per_fact_durations + .push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap()); + self.block_durations.push(millis.try_into().unwrap()); + self.n_facts.push(facts_count); + self.facts_in_db.push(self.total_facts); + self.total_facts += facts_count; + } + + pub fn n_results(&self) -> usize { + self.block_durations.len() + } + + /// Returns the average time per block (milliseconds). + fn block_average_time(&self) -> f64 { + #[allow(clippy::as_conversions)] + { + self.total_time as f64 / self.n_results() as f64 + } + } + + /// Returns the average time per fact over a window of `window_size` blocks (microseconds). + fn average_window_time(&self, window_size: usize) -> Vec { + let mut averages = Vec::new(); // In milliseconds. + // Takes only the full windows, so if the last window is smaller than `window_size`, it is + // ignored. + let n_windows = self.n_results() / window_size; + for i in 0..n_windows { + let window_start = i * window_size; + let sum: u64 = + self.block_durations[window_start..window_start + window_size].iter().sum(); + let sum_of_facts: usize = + self.n_facts[window_start..window_start + window_size].iter().sum(); + #[allow(clippy::as_conversions)] + averages.push(1000.0 * sum as f64 / sum_of_facts as f64); + } + averages + } + + pub fn pretty_print(&self, window_size: usize) { + if self.n_results() == 0 { + println!("No measurements were taken."); + return; + } + + println!( + "Total time: {} milliseconds for {} iterations.", + self.total_time, + self.n_results() + ); + println!( + "Average block time: {:.2} milliseconds. + ", + self.block_average_time() + ); + + println!("Average time per window of {window_size} iterations:"); + let means = self.average_window_time(window_size); + let max = means.iter().cloned().fold(f64::MIN, f64::max); + // Print a graph visualization of block times. + for (i, fact_duration) in means.iter().enumerate() { + let norm = fact_duration / max; + #[allow(clippy::as_conversions)] + let width = (norm * 40.0).round() as usize; // up tp 40 characters wide + let bar = "█".repeat(width.max(1)); + println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}"); + } + } + + pub fn to_csv(&self, path: &str, output_dir: &str) { + fs::create_dir_all(output_dir).expect("Failed to create output directory."); + let file = + File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file."); + let mut wtr = Writer::from_writer(file); + wtr.write_record([ + "block_number", + "n_facts", + "facts_in_db", + "time_per_fact_micros", + "block_duration_millis", + ]) + .expect("Failed to write CSV header."); + for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self + .per_fact_durations + .iter() + .zip(self.n_facts.iter()) + .zip(self.block_durations.iter()) + .zip(self.facts_in_db.iter()) + .enumerate() + { + wtr.write_record(&[ + i.to_string(), + n_facts.to_string(), + facts_in_db.to_string(), + per_fact.to_string(), + duration.to_string(), + ]) + .expect("Failed to write CSV record."); + } + wtr.flush().expect("Failed to flush CSV writer."); + } +} diff --git a/crates/starknet_committer_cli/Cargo.toml b/crates/starknet_committer_cli/Cargo.toml index f5e848f46da..6d55ee8c187 100644 --- a/crates/starknet_committer_cli/Cargo.toml +++ b/crates/starknet_committer_cli/Cargo.toml @@ -11,9 +11,8 @@ workspace = true [dependencies] clap = { workspace = true, features = ["cargo", "derive"] } -csv.workspace = true rand.workspace = true -starknet_committer = { workspace = true, features = ["testing"] } +starknet_committer = { workspace = true, features = ["benchmark", "testing"] } # TODO(Tzahi): Remove once tracing is moved to a common location. starknet_committer_and_os_cli.workspace = true starknet_patricia.workspace = true diff --git a/crates/starknet_committer_cli/src/commands.rs b/crates/starknet_committer_cli/src/commands.rs index 0e05fdb034b..f461fed844d 100644 --- a/crates/starknet_committer_cli/src/commands.rs +++ b/crates/starknet_committer_cli/src/commands.rs @@ -1,158 +1,15 @@ -use std::fs::{self, File}; -use std::time::Instant; - -use csv::Writer; use rand::rngs::SmallRng; use rand::SeedableRng; use starknet_committer::block_committer::commit::commit_block; use starknet_committer::block_committer::input::{ConfigImpl, Input}; use starknet_committer::block_committer::state_diff_generator::generate_random_state_diff; +use starknet_committer::block_committer::timing_util::TimeMeasurement; use starknet_patricia::hash::hash_trait::HashOutput; use starknet_patricia_storage::map_storage::MapStorage; use tracing::info; pub type InputImpl = Input; -struct TimeMeasurement { - timer: Option, - total_time: u128, // Total duration of all blocks (milliseconds). - per_fact_durations: Vec, // Average duration (microseconds) per new fact in a block. - n_facts: Vec, - block_durations: Vec, // Duration of a block (milliseconds). - facts_in_db: Vec, // Number of facts in the DB prior to the current block. - total_facts: usize, -} - -impl TimeMeasurement { - fn new(n_iterations: usize) -> Self { - Self { - timer: None, - total_time: 0, - per_fact_durations: Vec::with_capacity(n_iterations), - n_facts: Vec::with_capacity(n_iterations), - block_durations: Vec::with_capacity(n_iterations), - facts_in_db: Vec::with_capacity(n_iterations), - total_facts: 0, - } - } - - fn start_measurement(&mut self) { - self.timer = Some(Instant::now()); - } - - fn stop_measurement(&mut self, facts_count: usize) { - let duration = - self.timer.expect("stop_measurement called before start_measurement").elapsed(); - info!( - "Time elapsed for iteration {}: {} milliseconds", - self.n_results(), - duration.as_millis() - ); - let millis = duration.as_millis(); - self.total_time += millis; - #[allow(clippy::as_conversions)] - self.per_fact_durations - .push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap()); - self.block_durations.push(millis.try_into().unwrap()); - self.n_facts.push(facts_count); - self.facts_in_db.push(self.total_facts); - self.total_facts += facts_count; - } - - fn n_results(&self) -> usize { - self.block_durations.len() - } - - /// Returns the average time per block (milliseconds). - fn block_average_time(&self) -> f64 { - #[allow(clippy::as_conversions)] - { - self.total_time as f64 / self.n_results() as f64 - } - } - - /// Returns the average time per fact over a window of `window_size` blocks (microseconds). - fn average_window_time(&self, window_size: usize) -> Vec { - let mut averages = Vec::new(); // In milliseconds. - // Takes only the full windows, so if the last window is smaller than `window_size`, it is - // ignored. - let n_windows = self.n_results() / window_size; - for i in 0..n_windows { - let window_start = i * window_size; - let sum: u64 = - self.block_durations[window_start..window_start + window_size].iter().sum(); - let sum_of_facts: usize = - self.n_facts[window_start..window_start + window_size].iter().sum(); - #[allow(clippy::as_conversions)] - averages.push(1000.0 * sum as f64 / sum_of_facts as f64); - } - averages - } - - fn pretty_print(&self, window_size: usize) { - if self.n_results() == 0 { - println!("No measurements were taken."); - return; - } - - println!( - "Total time: {} milliseconds for {} iterations.", - self.total_time, - self.n_results() - ); - println!( - "Average block time: {:.2} milliseconds. - ", - self.block_average_time() - ); - - println!("Average time per window of {window_size} iterations:"); - let means = self.average_window_time(window_size); - let max = means.iter().cloned().fold(f64::MIN, f64::max); - // Print a graph visualization of block times. - for (i, fact_duration) in means.iter().enumerate() { - let norm = fact_duration / max; - #[allow(clippy::as_conversions)] - let width = (norm * 40.0).round() as usize; // up tp 40 characters wide - let bar = "█".repeat(width.max(1)); - println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}"); - } - } - - fn to_csv(&self, path: &str, output_dir: &str) { - fs::create_dir_all(output_dir).expect("Failed to create output directory."); - let file = - File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file."); - let mut wtr = Writer::from_writer(file); - wtr.write_record([ - "block_number", - "n_facts", - "facts_in_db", - "time_per_fact_micros", - "block_duration_millis", - ]) - .expect("Failed to write CSV header."); - for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self - .per_fact_durations - .iter() - .zip(self.n_facts.iter()) - .zip(self.block_durations.iter()) - .zip(self.facts_in_db.iter()) - .enumerate() - { - wtr.write_record(&[ - i.to_string(), - n_facts.to_string(), - facts_in_db.to_string(), - per_fact.to_string(), - duration.to_string(), - ]) - .expect("Failed to write CSV record."); - } - wtr.flush().expect("Failed to flush CSV writer."); - } -} - /// Runs the committer on n_iterations random generated blocks. /// Prints the time measurement to the console and saves statistics to a CSV file in the given /// output directory.