Skip to content

Commit fd1fbdd

Browse files
yoavGrsTzahiTaub
authored andcommitted
committer: add cli for committer benchmark
1 parent 4312805 commit fd1fbdd

File tree

12 files changed

+338
-8
lines changed

12 files changed

+338
-8
lines changed

Cargo.lock

Lines changed: 37 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ members = [
8181
"crates/starknet_api",
8282
"crates/starknet_committer",
8383
"crates/starknet_committer_and_os_cli",
84+
"crates/starknet_committer_cli",
8485
"crates/starknet_os",
8586
"crates/starknet_os_flow_tests",
8687
"crates/starknet_patricia",
@@ -201,6 +202,7 @@ clap = "4.5.4"
201202
colored = "3"
202203
const_format = "0.2.30"
203204
criterion = "0.5.1"
205+
csv = "1.3.1"
204206
dashmap = "6.1.0"
205207
deadqueue = "0.2.4"
206208
defaultmap = "0.5.0"
@@ -311,6 +313,7 @@ starknet-types-core = "0.1.8"
311313
starknet_api = { path = "crates/starknet_api", version = "0.0.0" }
312314
starknet_committer.path = "crates/starknet_committer"
313315
starknet_committer_and_os_cli.path = "crates/starknet_committer_and_os_cli"
316+
starknet_committer_cli.path = "crates/starknet_committer_cli"
314317
starknet_os.path = "crates/starknet_os"
315318
starknet_os_flow_tests.path = "crates/starknet_os_flow_tests"
316319
starknet_patricia.path = "crates/starknet_patricia"

commitlint.config.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ const AllowedScopes = ['apollo_batcher',
8080
'signature',
8181
'starknet_api',
8282
'starknet_committer',
83+
'starknet_committer_cli',
8384
'starknet_committer_and_os_cli',
8485
'starknet_os',
8586
'starknet_os_flow_tests',

crates/starknet_committer/src/block_committer/input.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ impl Default for ConfigImpl {
101101
}
102102
}
103103

104-
#[derive(Debug, Eq, PartialEq)]
104+
#[derive(Debug, Default, Eq, PartialEq)]
105105
pub struct Input<C: Config> {
106106
/// All relevant information for the state diff commitment.
107107
pub state_diff: StateDiff,

crates/starknet_committer/src/block_committer/state_diff_generator.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use crate::block_committer::random_structs::RandomValue;
1111
#[path = "state_diff_generator_test.rs"]
1212
pub mod state_diff_generator_test;
1313

14-
pub(crate) const RANDOM_STATE_DIFF_CONTRACT_ADDRESS: u32 = 500_u32;
14+
pub const RANDOM_STATE_DIFF_CONTRACT_ADDRESS: u32 = 500_u32;
1515
pub(crate) const N_STORAGE_UPDATES: usize = 1000_usize;
1616

1717
pub fn generate_random_state_diff<R: Rng>(rng: &mut R) -> StateDiff {

crates/starknet_committer/src/forest/filled_forest.rs

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ use starknet_patricia::patricia_merkle_tree::filled_tree::tree::FilledTree;
66
use starknet_patricia::patricia_merkle_tree::node_data::leaf::LeafModifications;
77
use starknet_patricia::patricia_merkle_tree::types::NodeIndex;
88
use starknet_patricia::patricia_merkle_tree::updated_skeleton_tree::tree::UpdatedSkeletonTreeImpl;
9-
use starknet_patricia_storage::storage_trait::Storage;
9+
use starknet_patricia_storage::storage_trait::{DbHashMap, Storage};
1010
use tracing::info;
1111

1212
use crate::block_committer::input::{
@@ -32,18 +32,24 @@ pub struct FilledForest {
3232
}
3333

3434
impl FilledForest {
35-
pub fn write_to_storage(&self, storage: &mut impl Storage) {
35+
/// Writes the node serialization of the filled trees to storage. Returns the number of new
36+
/// objects written to storage.
37+
pub fn write_to_storage(&self, storage: &mut impl Storage) -> usize {
3638
// Serialize all trees to one hash map.
37-
let new_db_objects = self
39+
let new_db_objects: DbHashMap = self
3840
.storage_tries
3941
.values()
4042
.flat_map(|tree| tree.serialize().into_iter())
4143
.chain(self.contracts_trie.serialize())
4244
.chain(self.classes_trie.serialize())
4345
.collect();
4446

45-
// Store the new hash map
46-
storage.mset(new_db_objects).expect("Write to storage failed");
47+
// Store the new hash map.
48+
let n_new_facts = new_db_objects.len();
49+
storage
50+
.mset(new_db_objects)
51+
.unwrap_or_else(|_| panic!("Write of {n_new_facts} new facts to storage failed"));
52+
n_new_facts
4753
}
4854

4955
pub fn get_contract_root_hash(&self) -> HashOutput {

crates/starknet_committer_and_os_cli/src/tracing_utils.rs

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use tracing::level_filters::LevelFilter;
2+
use tracing::Level;
23
use tracing_subscriber::prelude::*;
34
use tracing_subscriber::reload::Handle;
45
use tracing_subscriber::{filter, fmt, reload, Registry};
@@ -15,3 +16,18 @@ pub fn configure_tracing() -> Handle<LevelFilter, Registry> {
1516
tracing_subscriber::registry().with(global_filter).with(layer).init();
1617
global_filter_handle
1718
}
19+
20+
/// Change the given log handle to the given log level.
21+
pub fn modify_log_level(log_level: String, log_filter_handle: Handle<LevelFilter, Registry>) {
22+
let level = match log_level.to_lowercase().as_str() {
23+
"error" => Level::ERROR,
24+
"warn" => Level::WARN,
25+
"info" => Level::INFO,
26+
"debug" => Level::DEBUG,
27+
"trace" => Level::TRACE,
28+
_ => Level::INFO,
29+
};
30+
log_filter_handle
31+
.modify(|filter| *filter = level.into())
32+
.expect("Failed to set the log level.");
33+
}
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
[package]
2+
name = "starknet_committer_cli"
3+
version.workspace = true
4+
edition.workspace = true
5+
repository.workspace = true
6+
license.workspace = true
7+
description = "CLI for the committer storage benchmark."
8+
9+
[lints]
10+
workspace = true
11+
12+
[dependencies]
13+
clap = { workspace = true, features = ["cargo", "derive"] }
14+
csv.workspace = true
15+
rand.workspace = true
16+
starknet_committer = { workspace = true, features = ["testing"] }
17+
# TODO(Tzahi): Remove once tracing is moved to a common location.
18+
starknet_committer_and_os_cli.workspace = true
19+
starknet_patricia.workspace = true
20+
starknet_patricia_storage.workspace = true
21+
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
22+
tracing.workspace = true
23+
tracing-subscriber.workspace = true
Lines changed: 190 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
use std::fs::{self, File};
2+
use std::time::Instant;
3+
4+
use csv::Writer;
5+
use rand::rngs::SmallRng;
6+
use rand::SeedableRng;
7+
use starknet_committer::block_committer::commit::commit_block;
8+
use starknet_committer::block_committer::input::{ConfigImpl, Input};
9+
use starknet_committer::block_committer::state_diff_generator::generate_random_state_diff;
10+
use starknet_patricia::hash::hash_trait::HashOutput;
11+
use starknet_patricia_storage::map_storage::MapStorage;
12+
use tracing::info;
13+
14+
pub type InputImpl = Input<ConfigImpl>;
15+
16+
struct TimeMeasurement {
17+
timer: Option<Instant>,
18+
total_time: u128, // Total duration of all blocks (milliseconds).
19+
per_fact_durations: Vec<u64>, // Average duration (microseconds) per new fact in a block.
20+
n_facts: Vec<usize>,
21+
block_durations: Vec<u64>, // Duration of a block (milliseconds).
22+
facts_in_db: Vec<usize>, // Number of facts in the DB prior to the current block.
23+
total_facts: usize,
24+
}
25+
26+
impl TimeMeasurement {
27+
fn new(n_iterations: usize) -> Self {
28+
Self {
29+
timer: None,
30+
total_time: 0,
31+
per_fact_durations: Vec::with_capacity(n_iterations),
32+
n_facts: Vec::with_capacity(n_iterations),
33+
block_durations: Vec::with_capacity(n_iterations),
34+
facts_in_db: Vec::with_capacity(n_iterations),
35+
total_facts: 0,
36+
}
37+
}
38+
39+
fn start_measurement(&mut self) {
40+
self.timer = Some(Instant::now());
41+
}
42+
43+
fn stop_measurement(&mut self, facts_count: usize) {
44+
let duration =
45+
self.timer.expect("stop_measurement called before start_measurement").elapsed();
46+
info!(
47+
"Time elapsed for iteration {}: {} milliseconds",
48+
self.n_results(),
49+
duration.as_millis()
50+
);
51+
let millis = duration.as_millis();
52+
self.total_time += millis;
53+
#[allow(clippy::as_conversions)]
54+
self.per_fact_durations
55+
.push(duration.div_f32(facts_count as f32).as_micros().try_into().unwrap());
56+
self.block_durations.push(millis.try_into().unwrap());
57+
self.n_facts.push(facts_count);
58+
self.facts_in_db.push(self.total_facts);
59+
self.total_facts += facts_count;
60+
}
61+
62+
fn n_results(&self) -> usize {
63+
self.block_durations.len()
64+
}
65+
66+
/// Returns the average time per block (milliseconds).
67+
fn block_average_time(&self) -> f64 {
68+
#[allow(clippy::as_conversions)]
69+
{
70+
self.total_time as f64 / self.n_results() as f64
71+
}
72+
}
73+
74+
/// Returns the average time per fact over a window of `window_size` blocks (microseconds).
75+
fn average_window_time(&self, window_size: usize) -> Vec<f64> {
76+
let mut averages = Vec::new(); // In milliseconds.
77+
// Takes only the full windows, so if the last window is smaller than `window_size`, it is
78+
// ignored.
79+
let n_windows = self.n_results() / window_size;
80+
for i in 0..n_windows {
81+
let window_start = i * window_size;
82+
let sum: u64 =
83+
self.block_durations[window_start..window_start + window_size].iter().sum();
84+
let sum_of_facts: usize =
85+
self.n_facts[window_start..window_start + window_size].iter().sum();
86+
#[allow(clippy::as_conversions)]
87+
averages.push(1000.0 * sum as f64 / sum_of_facts as f64);
88+
}
89+
averages
90+
}
91+
92+
fn pretty_print(&self, window_size: usize) {
93+
if self.n_results() == 0 {
94+
println!("No measurements were taken.");
95+
return;
96+
}
97+
98+
println!(
99+
"Total time: {} milliseconds for {} iterations.",
100+
self.total_time,
101+
self.n_results()
102+
);
103+
println!(
104+
"Average block time: {:.2} milliseconds.
105+
",
106+
self.block_average_time()
107+
);
108+
109+
println!("Average time per window of {window_size} iterations:");
110+
let means = self.average_window_time(window_size);
111+
let max = means.iter().cloned().fold(f64::MIN, f64::max);
112+
// Print a graph visualization of block times.
113+
for (i, fact_duration) in means.iter().enumerate() {
114+
let norm = fact_duration / max;
115+
#[allow(clippy::as_conversions)]
116+
let width = (norm * 40.0).round() as usize; // up tp 40 characters wide
117+
let bar = "█".repeat(width.max(1));
118+
println!("win {i:>4}: {fact_duration:>8.4} microsecond / fact | {bar}");
119+
}
120+
}
121+
122+
fn to_csv(&self, path: &str, output_dir: &str) {
123+
fs::create_dir_all(output_dir).expect("Failed to create output directory.");
124+
let file =
125+
File::create(format!("{output_dir}/{path}")).expect("Failed to create CSV file.");
126+
let mut wtr = Writer::from_writer(file);
127+
wtr.write_record([
128+
"block_number",
129+
"n_facts",
130+
"facts_in_db",
131+
"time_per_fact_micros",
132+
"block_duration_millis",
133+
])
134+
.expect("Failed to write CSV header.");
135+
for (i, (((&per_fact, &n_facts), &duration), &facts_in_db)) in self
136+
.per_fact_durations
137+
.iter()
138+
.zip(self.n_facts.iter())
139+
.zip(self.block_durations.iter())
140+
.zip(self.facts_in_db.iter())
141+
.enumerate()
142+
{
143+
wtr.write_record(&[
144+
i.to_string(),
145+
n_facts.to_string(),
146+
facts_in_db.to_string(),
147+
per_fact.to_string(),
148+
duration.to_string(),
149+
])
150+
.expect("Failed to write CSV record.");
151+
}
152+
wtr.flush().expect("Failed to flush CSV writer.");
153+
}
154+
}
155+
156+
/// Runs the committer on n_iterations random generated blocks.
157+
/// Prints the time measurement to the console and saves statistics to a CSV file in the given
158+
/// output directory.
159+
pub async fn run_storage_benchmark(seed: u64, n_iterations: usize, output_dir: &str) {
160+
let mut rng = SmallRng::seed_from_u64(seed);
161+
let mut time_measurement = TimeMeasurement::new(n_iterations);
162+
163+
let mut storage = MapStorage::default();
164+
let mut contracts_trie_root_hash = HashOutput::default();
165+
let mut classes_trie_root_hash = HashOutput::default();
166+
167+
for i in 0..n_iterations {
168+
info!("Committer storage benchmark iteration {}/{}", i + 1, n_iterations);
169+
let input = InputImpl {
170+
state_diff: generate_random_state_diff(&mut rng),
171+
contracts_trie_root_hash,
172+
classes_trie_root_hash,
173+
config: ConfigImpl::default(),
174+
};
175+
176+
time_measurement.start_measurement();
177+
let filled_forest =
178+
commit_block(input, &mut storage).await.expect("Failed to commit the given block.");
179+
// TODO(Tzahi): measure the computation above and the fact writing below separately.
180+
let n_new_facts = filled_forest.write_to_storage(&mut storage);
181+
182+
time_measurement.stop_measurement(n_new_facts);
183+
184+
contracts_trie_root_hash = filled_forest.get_contract_root_hash();
185+
classes_trie_root_hash = filled_forest.get_compiled_class_root_hash();
186+
}
187+
188+
time_measurement.pretty_print(50);
189+
time_measurement.to_csv(&format!("{n_iterations}.csv"), output_dir);
190+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pub mod commands;

0 commit comments

Comments
 (0)