Skip to content

Commit 456bd4f

Browse files
yoavGrsTzahiTaub
authored andcommitted
committer: add cli for committer benchmark
1 parent 6be935f commit 456bd4f

File tree

10 files changed

+314
-7
lines changed

10 files changed

+314
-7
lines changed

Cargo.lock

Lines changed: 37 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ members = [
8181
"crates/starknet_api",
8282
"crates/starknet_committer",
8383
"crates/starknet_committer_and_os_cli",
84+
"crates/starknet_committer_cli",
8485
"crates/starknet_os",
8586
"crates/starknet_os_flow_tests",
8687
"crates/starknet_patricia",
@@ -201,6 +202,7 @@ clap = "4.5.4"
201202
colored = "3"
202203
const_format = "0.2.30"
203204
criterion = "0.5.1"
205+
csv = "1.3.1"
204206
dashmap = "6.1.0"
205207
deadqueue = "0.2.4"
206208
defaultmap = "0.5.0"

commitlint.config.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ const AllowedScopes = ['apollo_batcher',
8080
'signature',
8181
'starknet_api',
8282
'starknet_committer',
83+
'starknet_committer_cli',
8384
'starknet_committer_and_os_cli',
8485
'starknet_os',
8586
'starknet_os_flow_tests',

crates/starknet_committer/src/block_committer/input.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ impl Default for ConfigImpl {
101101
}
102102
}
103103

104-
#[derive(Debug, Eq, PartialEq)]
104+
#[derive(Debug, Default, Eq, PartialEq)]
105105
pub struct Input<C: Config> {
106106
/// All relevant information for the state diff commitment.
107107
pub state_diff: StateDiff,

crates/starknet_committer/src/block_committer/state_diff_generator.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use crate::block_committer::random_structs::RandomValue;
1111
#[path = "state_diff_generator_test.rs"]
1212
pub mod state_diff_generator_test;
1313

14-
pub(crate) const RANDOM_STATE_DIFF_CONTRACT_ADDRESS: u32 = 500_u32;
14+
pub const RANDOM_STATE_DIFF_CONTRACT_ADDRESS: u32 = 500_u32;
1515
pub(crate) const N_STORAGE_UPDATES: usize = 1000_usize;
1616

1717
pub fn generate_random_state_diff<R: Rng>(rng: &mut R) -> StateDiff {

crates/starknet_committer/src/forest/filled_forest.rs

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ use starknet_patricia::patricia_merkle_tree::filled_tree::tree::FilledTree;
66
use starknet_patricia::patricia_merkle_tree::node_data::leaf::LeafModifications;
77
use starknet_patricia::patricia_merkle_tree::types::NodeIndex;
88
use starknet_patricia::patricia_merkle_tree::updated_skeleton_tree::tree::UpdatedSkeletonTreeImpl;
9-
use starknet_patricia_storage::storage_trait::Storage;
9+
use starknet_patricia_storage::storage_trait::{DbHashMap, Storage};
1010
use tracing::info;
1111

1212
use crate::block_committer::input::{
@@ -32,18 +32,24 @@ pub struct FilledForest {
3232
}
3333

3434
impl FilledForest {
35-
pub fn write_to_storage(&self, storage: &mut impl Storage) {
35+
/// Writes the node serialization of the filled trees to storage. Returns the number of new
36+
/// objects written to storage.
37+
pub fn write_to_storage(&self, storage: &mut impl Storage) -> usize {
3638
// Serialize all trees to one hash map.
37-
let new_db_objects = self
39+
let new_db_objects: DbHashMap = self
3840
.storage_tries
3941
.values()
4042
.flat_map(|tree| tree.serialize().into_iter())
4143
.chain(self.contracts_trie.serialize())
4244
.chain(self.classes_trie.serialize())
4345
.collect();
4446

45-
// Store the new hash map
46-
storage.mset(new_db_objects).expect("Write to storage failed");
47+
// Store the new hash map.
48+
let n_new_facts = new_db_objects.len();
49+
storage
50+
.mset(new_db_objects)
51+
.unwrap_or_else(|_| panic!("Write of {n_new_facts} new facts to storage failed"));
52+
n_new_facts
4753
}
4854

4955
pub fn get_contract_root_hash(&self) -> HashOutput {
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
[package]
2+
name = "starknet_committer_cli"
3+
version.workspace = true
4+
edition.workspace = true
5+
repository.workspace = true
6+
license-file.workspace = true
7+
description = "CLI for the committer storage benchmark."
8+
9+
[lints]
10+
workspace = true
11+
12+
[dependencies]
13+
clap = { workspace = true, features = ["cargo", "derive"] }
14+
csv.workspace = true
15+
rand.workspace = true
16+
starknet_committer = { workspace = true, features = ["testing"] }
17+
# TODO(Tzahi): Remove once tracing is moved to a common location.
18+
starknet_committer_and_os_cli.workspace = true
19+
starknet_patricia.workspace = true
20+
starknet_patricia_storage.workspace = true
21+
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
22+
tracing.workspace = true
23+
tracing-subscriber.workspace = true
Lines changed: 178 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
#![allow(clippy::as_conversions)]
2+
3+
use std::fs::{self, File};
4+
use std::time::Instant;
5+
6+
use csv::Writer;
7+
use rand::rngs::StdRng;
8+
use rand::SeedableRng;
9+
use starknet_committer::block_committer::commit::commit_block;
10+
use starknet_committer::block_committer::input::{ConfigImpl, Input};
11+
use starknet_committer::block_committer::state_diff_generator::generate_random_state_diff;
12+
use starknet_patricia::hash::hash_trait::HashOutput;
13+
use starknet_patricia_storage::map_storage::MapStorage;
14+
use tracing::info;
15+
16+
pub type InputImpl = Input<ConfigImpl>;
17+
struct TimeMeasurement {
18+
timer: Option<Instant>,
19+
total_time: u128, // Total duration of all blocks (milliseconds).
20+
per_fact_durations: Vec<u64>, // Average duration (microseconds) per new fact in a block.
21+
n_facts: Vec<usize>,
22+
block_durations: Vec<u64>, // Duration of a block (milliseconds).
23+
facts_in_db: Vec<usize>,
24+
total_facts: usize,
25+
}
26+
27+
impl TimeMeasurement {
28+
fn new(n_iterations: usize) -> Self {
29+
Self {
30+
timer: None,
31+
total_time: 0,
32+
per_fact_durations: Vec::with_capacity(n_iterations),
33+
n_facts: Vec::with_capacity(n_iterations),
34+
block_durations: Vec::with_capacity(n_iterations),
35+
facts_in_db: Vec::with_capacity(n_iterations),
36+
total_facts: 0,
37+
}
38+
}
39+
40+
fn start_measurement(&mut self) {
41+
self.timer = Some(Instant::now());
42+
}
43+
44+
fn stop_measurement(&mut self, facts_count: usize) {
45+
let duration =
46+
self.timer.expect("stop_measurement called before start_measurement").elapsed();
47+
info!(
48+
"Time elapsed for iteration {}: {} milliseconds",
49+
self.n_results(),
50+
duration.as_millis()
51+
);
52+
self.total_time += duration.as_millis();
53+
self.per_fact_durations
54+
.push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap());
55+
self.block_durations.push(duration.as_millis().try_into().unwrap());
56+
self.n_facts.push(facts_count);
57+
self.facts_in_db.push(self.total_facts);
58+
self.total_facts += facts_count;
59+
}
60+
61+
fn n_results(&self) -> usize {
62+
self.block_durations.len()
63+
}
64+
65+
/// Returns the average time per block (milliseconds).
66+
fn block_average_time(&self) -> f64 {
67+
self.total_time as f64 / self.n_results() as f64
68+
}
69+
70+
/// Returns the average time per fact over a window of `window_size` blocks (microseconds).
71+
fn average_window_time(&self, window_size: usize) -> Vec<f64> {
72+
let mut averages = Vec::new(); // In milliseconds.
73+
// Takes only the full windows, so if the last window is smaller than `window_size`, it is
74+
// ignored.
75+
let n_windows = self.n_results() / window_size;
76+
for i in 0..n_windows {
77+
let window_start = i * window_size;
78+
let sum: u64 =
79+
self.block_durations[window_start..window_start + window_size].iter().sum();
80+
let sum_of_facts: usize =
81+
self.n_facts[window_start..window_start + window_size].iter().sum();
82+
averages.push(1000.0 * sum as f64 / sum_of_facts as f64);
83+
}
84+
averages
85+
}
86+
87+
fn pretty_print(&self, window_size: usize) {
88+
if self.n_results() == 0 {
89+
println!("No measurements were taken.");
90+
return;
91+
}
92+
93+
println!(
94+
"Total time: {} milliseconds for {} iterations",
95+
self.total_time,
96+
self.n_results()
97+
);
98+
println!("Average block time: {:.2} milliseconds", self.block_average_time());
99+
100+
println!("Average time per window of {window_size} iterations:");
101+
let means = self.average_window_time(window_size);
102+
let max = means.iter().cloned().fold(f64::MIN, f64::max);
103+
// Print a graph visualization of block times.
104+
for (i, fact_duration) in means.iter().enumerate() {
105+
let norm = fact_duration / max;
106+
let width = (norm * 40.0).round() as usize; // up tp 40 characters wide
107+
let bar = "█".repeat(width.max(1));
108+
println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}");
109+
}
110+
}
111+
112+
fn to_csv(&self, path: &str, output_dir: &str) {
113+
let _ = fs::create_dir_all(output_dir);
114+
let file =
115+
File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file.");
116+
let mut wtr = Writer::from_writer(file);
117+
wtr.write_record([
118+
"block_number",
119+
"n_facts",
120+
"facts_in_db",
121+
"time_per_fact_micros",
122+
"block_duration_millis",
123+
])
124+
.expect("Failed to write CSV header.");
125+
for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self
126+
.per_fact_durations
127+
.iter()
128+
.zip(self.n_facts.iter())
129+
.zip(self.block_durations.iter())
130+
.zip(self.facts_in_db.iter())
131+
.enumerate()
132+
{
133+
wtr.write_record(&[
134+
i.to_string(),
135+
n_facts.to_string(),
136+
facts_in_db.to_string(),
137+
per_fact.to_string(),
138+
duration.to_string(),
139+
])
140+
.expect("Failed to write CSV record.");
141+
}
142+
wtr.flush().expect("Failed to flush CSV writer.");
143+
}
144+
}
145+
146+
pub async fn run_storage_benchmark(n_iterations: usize, output_dir: &str) {
147+
let seed = 42_u64; // Constant seed for reproducibility
148+
149+
let mut rng = StdRng::seed_from_u64(seed);
150+
let mut time_measurement = TimeMeasurement::new(n_iterations);
151+
152+
let mut storage = MapStorage::default();
153+
let mut contracts_trie_root_hash = HashOutput::default();
154+
let mut classes_trie_root_hash = HashOutput::default();
155+
156+
for i in 0..n_iterations {
157+
info!("Committer storage benchmark iteration {}/{}", i + 1, n_iterations);
158+
let input = InputImpl {
159+
state_diff: generate_random_state_diff(&mut rng),
160+
contracts_trie_root_hash,
161+
classes_trie_root_hash,
162+
config: ConfigImpl::default(),
163+
};
164+
165+
time_measurement.start_measurement();
166+
let filled_forest =
167+
commit_block(input, &mut storage).await.expect("Failed to commit the given block.");
168+
let n_new_facts = filled_forest.write_to_storage(&mut storage);
169+
170+
time_measurement.stop_measurement(n_new_facts);
171+
172+
contracts_trie_root_hash = filled_forest.get_contract_root_hash();
173+
classes_trie_root_hash = filled_forest.get_compiled_class_root_hash();
174+
}
175+
176+
time_measurement.pretty_print(50);
177+
time_measurement.to_csv(&format!("{n_iterations}.csv"), output_dir);
178+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pub mod commands;
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
use clap::{Args, Parser, Subcommand};
2+
use starknet_committer_and_os_cli::tracing_utils::configure_tracing;
3+
use starknet_committer_cli::commands::run_storage_benchmark;
4+
use tracing::level_filters::LevelFilter;
5+
use tracing::{info, Level};
6+
use tracing_subscriber::reload::Handle;
7+
use tracing_subscriber::Registry;
8+
9+
#[derive(Parser, Debug)]
10+
pub struct CommitterCliCommand {
11+
#[clap(subcommand)]
12+
command: Command,
13+
}
14+
15+
#[derive(Debug, Args)]
16+
struct StorageArgs {
17+
/// Number of iterations to run the benchmark.
18+
#[clap(default_value = "1000")]
19+
n_iterations: usize,
20+
#[clap(long, default_value = "warn")]
21+
log_level: String,
22+
#[clap(long, default_value = "/tmp/committer_storage_benchmark")]
23+
output_dir: String,
24+
}
25+
26+
#[derive(Debug, Subcommand)]
27+
enum Command {
28+
StorageBenchmark(StorageArgs),
29+
}
30+
31+
pub async fn run_committer_cli(
32+
committer_command: CommitterCliCommand,
33+
log_filter_handle: Handle<LevelFilter, Registry>,
34+
) {
35+
info!("Starting committer-cli with command: \n{:?}", committer_command);
36+
match committer_command.command {
37+
Command::StorageBenchmark(args) => {
38+
let level = match args.log_level.to_lowercase().as_str() {
39+
"error" => Level::ERROR,
40+
"warn" => Level::WARN,
41+
"info" => Level::INFO,
42+
"debug" => Level::DEBUG,
43+
"trace" => Level::TRACE,
44+
_ => Level::INFO,
45+
};
46+
log_filter_handle
47+
.modify(|filter| *filter = level.into())
48+
.expect("Failed to set the log level.");
49+
run_storage_benchmark(args.n_iterations, &args.output_dir).await;
50+
}
51+
}
52+
}
53+
54+
#[tokio::main]
55+
async fn main() {
56+
let log_filter_handle = configure_tracing();
57+
let committer_command = CommitterCliCommand::parse();
58+
run_committer_cli(committer_command, log_filter_handle).await;
59+
}

0 commit comments

Comments
 (0)