Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions crates/starknet_committer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ description = "Computes and manages Starknet state."

[features]
testing = ["starknet_patricia/testing"]
benchmark = ["csv"]

[dependencies]
csv = { workspace = true, optional = true }
ethnum.workspace = true
hex.workspace = true
pretty_assertions.workspace = true
Expand Down
2 changes: 2 additions & 0 deletions crates/starknet_committer/src/block_committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,5 @@ pub mod input;
pub mod random_structs;
#[cfg(any(feature = "testing", test))]
pub mod state_diff_generator;
#[cfg(feature = "benchmark")]
pub mod timing_util;
144 changes: 144 additions & 0 deletions crates/starknet_committer/src/block_committer/timing_util.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
use std::fs::{self, File};
use std::time::Instant;

use csv::Writer;
use tracing::info;
pub struct TimeMeasurement {
timer: Option<Instant>,
total_time: u128, // Total duration of all blocks (milliseconds).
per_fact_durations: Vec<u64>, // Average duration (microseconds) per new fact in a block.
n_facts: Vec<usize>,
block_durations: Vec<u64>, // Duration of a block (milliseconds).
facts_in_db: Vec<usize>, // Number of facts in the DB prior to the current block.
total_facts: usize,
}

impl TimeMeasurement {
pub fn new(n_iterations: usize) -> Self {
Self {
timer: None,
total_time: 0,
per_fact_durations: Vec::with_capacity(n_iterations),
n_facts: Vec::with_capacity(n_iterations),
block_durations: Vec::with_capacity(n_iterations),
facts_in_db: Vec::with_capacity(n_iterations),
total_facts: 0,
}
}

pub fn start_measurement(&mut self) {
self.timer = Some(Instant::now());
}

pub fn stop_measurement(&mut self, facts_count: usize) {
let duration =
self.timer.expect("stop_measurement called before start_measurement").elapsed();
info!(
"Time elapsed for iteration {}: {} milliseconds",
self.n_results(),
duration.as_millis()
);
let millis = duration.as_millis();
self.total_time += millis;
#[allow(clippy::as_conversions)]
self.per_fact_durations
.push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap());
self.block_durations.push(millis.try_into().unwrap());
self.n_facts.push(facts_count);
self.facts_in_db.push(self.total_facts);
self.total_facts += facts_count;
}

pub fn n_results(&self) -> usize {
self.block_durations.len()
}

/// Returns the average time per block (milliseconds).
fn block_average_time(&self) -> f64 {
#[allow(clippy::as_conversions)]
{
self.total_time as f64 / self.n_results() as f64
}
}

/// Returns the average time per fact over a window of `window_size` blocks (microseconds).
fn average_window_time(&self, window_size: usize) -> Vec<f64> {
let mut averages = Vec::new(); // In milliseconds.
// Takes only the full windows, so if the last window is smaller than `window_size`, it is
// ignored.
let n_windows = self.n_results() / window_size;
for i in 0..n_windows {
let window_start = i * window_size;
let sum: u64 =
self.block_durations[window_start..window_start + window_size].iter().sum();
let sum_of_facts: usize =
self.n_facts[window_start..window_start + window_size].iter().sum();
#[allow(clippy::as_conversions)]
averages.push(1000.0 * sum as f64 / sum_of_facts as f64);
}
averages
}

pub fn pretty_print(&self, window_size: usize) {
if self.n_results() == 0 {
println!("No measurements were taken.");
return;
}

println!(
"Total time: {} milliseconds for {} iterations.",
self.total_time,
self.n_results()
);
println!(
"Average block time: {:.2} milliseconds.
",
self.block_average_time()
);

println!("Average time per window of {window_size} iterations:");
let means = self.average_window_time(window_size);
let max = means.iter().cloned().fold(f64::MIN, f64::max);
// Print a graph visualization of block times.
for (i, fact_duration) in means.iter().enumerate() {
let norm = fact_duration / max;
#[allow(clippy::as_conversions)]
let width = (norm * 40.0).round() as usize; // up tp 40 characters wide
let bar = "█".repeat(width.max(1));
println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}");
}
}

pub fn to_csv(&self, path: &str, output_dir: &str) {
fs::create_dir_all(output_dir).expect("Failed to create output directory.");
let file =
File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file.");
let mut wtr = Writer::from_writer(file);
wtr.write_record([
"block_number",
"n_facts",
"facts_in_db",
"time_per_fact_micros",
"block_duration_millis",
])
.expect("Failed to write CSV header.");
for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self
.per_fact_durations
.iter()
.zip(self.n_facts.iter())
.zip(self.block_durations.iter())
.zip(self.facts_in_db.iter())
.enumerate()
{
wtr.write_record(&[
i.to_string(),
n_facts.to_string(),
facts_in_db.to_string(),
per_fact.to_string(),
duration.to_string(),
])
.expect("Failed to write CSV record.");
}
wtr.flush().expect("Failed to flush CSV writer.");
}
}
3 changes: 1 addition & 2 deletions crates/starknet_committer_cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,8 @@ workspace = true

[dependencies]
clap = { workspace = true, features = ["cargo", "derive"] }
csv.workspace = true
rand.workspace = true
starknet_committer = { workspace = true, features = ["testing"] }
starknet_committer = { workspace = true, features = ["benchmark", "testing"] }
# TODO(Tzahi): Remove once tracing is moved to a common location.
starknet_committer_and_os_cli.workspace = true
starknet_patricia.workspace = true
Expand Down
145 changes: 1 addition & 144 deletions crates/starknet_committer_cli/src/commands.rs
Original file line number Diff line number Diff line change
@@ -1,158 +1,15 @@
use std::fs::{self, File};
use std::time::Instant;

use csv::Writer;
use rand::rngs::SmallRng;
use rand::SeedableRng;
use starknet_committer::block_committer::commit::commit_block;
use starknet_committer::block_committer::input::{ConfigImpl, Input};
use starknet_committer::block_committer::state_diff_generator::generate_random_state_diff;
use starknet_committer::block_committer::timing_util::TimeMeasurement;
use starknet_patricia::hash::hash_trait::HashOutput;
use starknet_patricia_storage::map_storage::MapStorage;
use tracing::info;

pub type InputImpl = Input<ConfigImpl>;

struct TimeMeasurement {
timer: Option<Instant>,
total_time: u128, // Total duration of all blocks (milliseconds).
per_fact_durations: Vec<u64>, // Average duration (microseconds) per new fact in a block.
n_facts: Vec<usize>,
block_durations: Vec<u64>, // Duration of a block (milliseconds).
facts_in_db: Vec<usize>, // Number of facts in the DB prior to the current block.
total_facts: usize,
}

impl TimeMeasurement {
fn new(n_iterations: usize) -> Self {
Self {
timer: None,
total_time: 0,
per_fact_durations: Vec::with_capacity(n_iterations),
n_facts: Vec::with_capacity(n_iterations),
block_durations: Vec::with_capacity(n_iterations),
facts_in_db: Vec::with_capacity(n_iterations),
total_facts: 0,
}
}

fn start_measurement(&mut self) {
self.timer = Some(Instant::now());
}

fn stop_measurement(&mut self, facts_count: usize) {
let duration =
self.timer.expect("stop_measurement called before start_measurement").elapsed();
info!(
"Time elapsed for iteration {}: {} milliseconds",
self.n_results(),
duration.as_millis()
);
let millis = duration.as_millis();
self.total_time += millis;
#[allow(clippy::as_conversions)]
self.per_fact_durations
.push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap());
self.block_durations.push(millis.try_into().unwrap());
self.n_facts.push(facts_count);
self.facts_in_db.push(self.total_facts);
self.total_facts += facts_count;
}

fn n_results(&self) -> usize {
self.block_durations.len()
}

/// Returns the average time per block (milliseconds).
fn block_average_time(&self) -> f64 {
#[allow(clippy::as_conversions)]
{
self.total_time as f64 / self.n_results() as f64
}
}

/// Returns the average time per fact over a window of `window_size` blocks (microseconds).
fn average_window_time(&self, window_size: usize) -> Vec<f64> {
let mut averages = Vec::new(); // In milliseconds.
// Takes only the full windows, so if the last window is smaller than `window_size`, it is
// ignored.
let n_windows = self.n_results() / window_size;
for i in 0..n_windows {
let window_start = i * window_size;
let sum: u64 =
self.block_durations[window_start..window_start + window_size].iter().sum();
let sum_of_facts: usize =
self.n_facts[window_start..window_start + window_size].iter().sum();
#[allow(clippy::as_conversions)]
averages.push(1000.0 * sum as f64 / sum_of_facts as f64);
}
averages
}

fn pretty_print(&self, window_size: usize) {
if self.n_results() == 0 {
println!("No measurements were taken.");
return;
}

println!(
"Total time: {} milliseconds for {} iterations.",
self.total_time,
self.n_results()
);
println!(
"Average block time: {:.2} milliseconds.
",
self.block_average_time()
);

println!("Average time per window of {window_size} iterations:");
let means = self.average_window_time(window_size);
let max = means.iter().cloned().fold(f64::MIN, f64::max);
// Print a graph visualization of block times.
for (i, fact_duration) in means.iter().enumerate() {
let norm = fact_duration / max;
#[allow(clippy::as_conversions)]
let width = (norm * 40.0).round() as usize; // up tp 40 characters wide
let bar = "█".repeat(width.max(1));
println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}");
}
}

fn to_csv(&self, path: &str, output_dir: &str) {
fs::create_dir_all(output_dir).expect("Failed to create output directory.");
let file =
File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file.");
let mut wtr = Writer::from_writer(file);
wtr.write_record([
"block_number",
"n_facts",
"facts_in_db",
"time_per_fact_micros",
"block_duration_millis",
])
.expect("Failed to write CSV header.");
for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self
.per_fact_durations
.iter()
.zip(self.n_facts.iter())
.zip(self.block_durations.iter())
.zip(self.facts_in_db.iter())
.enumerate()
{
wtr.write_record(&[
i.to_string(),
n_facts.to_string(),
facts_in_db.to_string(),
per_fact.to_string(),
duration.to_string(),
])
.expect("Failed to write CSV record.");
}
wtr.flush().expect("Failed to flush CSV writer.");
}
}

/// Runs the committer on n_iterations random generated blocks.
/// Prints the time measurement to the console and saves statistics to a CSV file in the given
/// output directory.
Expand Down
Loading