Skip to content

Commit 5217c47

Browse files
committed
Revert "Merge pull request #2161 from Kobzol/check-test-cases-with-measurements"
This reverts commit f521c0a, reversing changes made to b34ad29.
1 parent 71dfcd1 commit 5217c47

File tree

9 files changed

+64
-315
lines changed

9 files changed

+64
-315
lines changed

Cargo.lock

Lines changed: 1 addition & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

collector/Cargo.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ anyhow = { workspace = true }
1111
chrono = { workspace = true, features = ["serde"] }
1212
clap = { workspace = true, features = ["derive"] }
1313
env_logger = { workspace = true }
14-
hashbrown = { workspace = true }
1514
log = { workspace = true }
1615
reqwest = { workspace = true, features = ["blocking", "json"] }
1716
serde = { workspace = true, features = ["derive"] }

collector/src/bin/collector.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,8 +225,6 @@ fn profile_compile(
225225
toolchain,
226226
Some(1),
227227
targets,
228-
// We always want to profile everything
229-
&hashbrown::HashSet::new(),
230228
));
231229
eprintln!("Finished benchmark {benchmark_id}");
232230

@@ -1806,8 +1804,11 @@ async fn bench_compile(
18061804
print_intro: &dyn Fn(),
18071805
measure: F,
18081806
) {
1809-
collector.start_compile_step(conn, benchmark_name).await;
1810-
1807+
let is_fresh = collector.start_compile_step(conn, benchmark_name).await;
1808+
if !is_fresh {
1809+
eprintln!("skipping {} -- already benchmarked", benchmark_name);
1810+
return;
1811+
}
18111812
let mut tx = conn.transaction().await;
18121813
let (supports_stable, category) = category.db_representation();
18131814
tx.conn()
@@ -1818,7 +1819,7 @@ async fn bench_compile(
18181819
tx.conn(),
18191820
benchmark_name,
18201821
&shared.artifact_id,
1821-
collector,
1822+
collector.artifact_row_id,
18221823
config.is_self_profile,
18231824
);
18241825
let result = measure(&mut processor).await;
@@ -1865,7 +1866,6 @@ async fn bench_compile(
18651866
&shared.toolchain,
18661867
config.iterations,
18671868
&config.targets,
1868-
&collector.measured_compile_test_cases,
18691869
))
18701870
.await
18711871
.with_context(|| anyhow::anyhow!("Cannot compile {}", benchmark.name))

collector/src/compile/benchmark/mod.rs

Lines changed: 17 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ use crate::compile::execute::{CargoProcess, Processor};
88
use crate::toolchain::Toolchain;
99
use crate::utils::wait_for_future;
1010
use anyhow::{bail, Context};
11-
use database::selector::CompileTestCase;
1211
use log::debug;
1312
use std::collections::{HashMap, HashSet};
1413
use std::fmt::{Display, Formatter};
@@ -244,7 +243,6 @@ impl Benchmark {
244243
toolchain: &Toolchain,
245244
iterations: Option<usize>,
246245
targets: &[Target],
247-
already_computed: &hashbrown::HashSet<CompileTestCase>,
248246
) -> anyhow::Result<()> {
249247
if self.config.disabled {
250248
eprintln!("Skipping {}: disabled", self.name);
@@ -275,65 +273,19 @@ impl Benchmark {
275273
return Ok(());
276274
}
277275

278-
struct BenchmarkDir {
279-
dir: TempDir,
280-
scenarios: Vec<Scenario>,
281-
profile: Profile,
282-
backend: CodegenBackend,
283-
target: Target,
284-
}
285-
286-
// Materialize the test cases that we want to benchmark
287-
// We need to handle scenarios a bit specially, because they share the target directory
288-
let mut benchmark_dirs: Vec<BenchmarkDir> = vec![];
289-
276+
eprintln!("Preparing {}", self.name);
277+
let mut target_dirs: Vec<((CodegenBackend, Profile, Target), TempDir)> = vec![];
290278
for backend in backends {
291279
for profile in &profiles {
292280
for target in targets {
293-
// Do we have any scenarios left to compute?
294-
let remaining_scenarios = scenarios
295-
.iter()
296-
.filter(|scenario| {
297-
self.should_run_scenario(
298-
scenario,
299-
profile,
300-
backend,
301-
target,
302-
already_computed,
303-
)
304-
})
305-
.copied()
306-
.collect::<Vec<Scenario>>();
307-
if remaining_scenarios.is_empty() {
308-
continue;
309-
}
310-
311-
let temp_dir = self.make_temp_dir(&self.path)?;
312-
benchmark_dirs.push(BenchmarkDir {
313-
dir: temp_dir,
314-
scenarios: remaining_scenarios,
315-
profile: *profile,
316-
backend: *backend,
317-
target: *target,
318-
});
281+
target_dirs.push((
282+
(*backend, *profile, *target),
283+
self.make_temp_dir(&self.path)?,
284+
));
319285
}
320286
}
321287
}
322288

323-
if benchmark_dirs.is_empty() {
324-
eprintln!(
325-
"Skipping {}: all test cases were previously computed",
326-
self.name
327-
);
328-
return Ok(());
329-
}
330-
331-
eprintln!(
332-
"Preparing {} (test cases: {})",
333-
self.name,
334-
benchmark_dirs.len()
335-
);
336-
337289
// In parallel (but with a limit to the number of CPUs), prepare all
338290
// profiles. This is done in parallel vs. sequentially because:
339291
// * We don't record any measurements during this phase, so the
@@ -367,18 +319,18 @@ impl Benchmark {
367319
.get(),
368320
)
369321
.context("jobserver::new")?;
370-
let mut threads = Vec::with_capacity(benchmark_dirs.len());
371-
for benchmark_dir in &benchmark_dirs {
322+
let mut threads = Vec::with_capacity(target_dirs.len());
323+
for ((backend, profile, target), prep_dir) in &target_dirs {
372324
let server = server.clone();
373325
let thread = s.spawn::<_, anyhow::Result<()>>(move || {
374326
wait_for_future(async move {
375327
let server = server.clone();
376328
self.mk_cargo_process(
377329
toolchain,
378-
benchmark_dir.dir.path(),
379-
benchmark_dir.profile,
380-
benchmark_dir.backend,
381-
benchmark_dir.target,
330+
prep_dir.path(),
331+
*profile,
332+
*backend,
333+
*target,
382334
)
383335
.jobserver(server)
384336
.run_rustc(false)
@@ -413,11 +365,10 @@ impl Benchmark {
413365
let mut timing_dirs: Vec<ManuallyDrop<TempDir>> = vec![];
414366

415367
let benchmark_start = std::time::Instant::now();
416-
for benchmark_dir in &benchmark_dirs {
417-
let backend = benchmark_dir.backend;
418-
let profile = benchmark_dir.profile;
419-
let target = benchmark_dir.target;
420-
let scenarios = &benchmark_dir.scenarios;
368+
for ((backend, profile, target), prep_dir) in &target_dirs {
369+
let backend = *backend;
370+
let profile = *profile;
371+
let target = *target;
421372
eprintln!(
422373
"Running {}: {:?} + {:?} + {:?} + {:?}",
423374
self.name, profile, scenarios, backend, target,
@@ -437,7 +388,7 @@ impl Benchmark {
437388
}
438389
log::debug!("Benchmark iteration {}/{}", i + 1, iterations);
439390
// Don't delete the directory on error.
440-
let timing_dir = ManuallyDrop::new(self.make_temp_dir(benchmark_dir.dir.path())?);
391+
let timing_dir = ManuallyDrop::new(self.make_temp_dir(prep_dir.path())?);
441392
let cwd = timing_dir.path();
442393

443394
// A full non-incremental build.
@@ -507,67 +458,6 @@ impl Benchmark {
507458

508459
Ok(())
509460
}
510-
511-
/// Return true if the given `scenario` should be computed.
512-
fn should_run_scenario(
513-
&self,
514-
scenario: &Scenario,
515-
profile: &Profile,
516-
backend: &CodegenBackend,
517-
target: &Target,
518-
already_computed: &hashbrown::HashSet<CompileTestCase>,
519-
) -> bool {
520-
let benchmark = database::Benchmark::from(self.name.0.as_str());
521-
let profile = match profile {
522-
Profile::Check => database::Profile::Check,
523-
Profile::Debug => database::Profile::Debug,
524-
Profile::Doc => database::Profile::Doc,
525-
Profile::DocJson => database::Profile::DocJson,
526-
Profile::Opt => database::Profile::Opt,
527-
Profile::Clippy => database::Profile::Clippy,
528-
};
529-
let backend = match backend {
530-
CodegenBackend::Llvm => database::CodegenBackend::Llvm,
531-
CodegenBackend::Cranelift => database::CodegenBackend::Cranelift,
532-
};
533-
let target = match target {
534-
Target::X86_64UnknownLinuxGnu => database::Target::X86_64UnknownLinuxGnu,
535-
};
536-
537-
match scenario {
538-
// For these scenarios, we can simply check if they were benchmarked or not
539-
Scenario::Full | Scenario::IncrFull | Scenario::IncrUnchanged => {
540-
let test_case = CompileTestCase {
541-
benchmark,
542-
profile,
543-
backend,
544-
target,
545-
scenario: match scenario {
546-
Scenario::Full => database::Scenario::Empty,
547-
Scenario::IncrFull => database::Scenario::IncrementalEmpty,
548-
Scenario::IncrUnchanged => database::Scenario::IncrementalFresh,
549-
Scenario::IncrPatched => unreachable!(),
550-
},
551-
};
552-
!already_computed.contains(&test_case)
553-
}
554-
// For incr-patched, it is a bit more complicated.
555-
// If there is at least a single uncomputed `IncrPatched`, we need to rerun
556-
// all of them, because they stack on top of one another.
557-
// Note that we don't need to explicitly include `IncrFull` if `IncrPatched`
558-
// is selected, as the benchmark code will always run `IncrFull` before `IncrPatched`.
559-
Scenario::IncrPatched => self.patches.iter().any(|patch| {
560-
let test_case = CompileTestCase {
561-
benchmark,
562-
profile,
563-
scenario: database::Scenario::IncrementalPatch(patch.name),
564-
backend,
565-
target,
566-
};
567-
!already_computed.contains(&test_case)
568-
}),
569-
}
570-
}
571461
}
572462

573463
/// Directory containing compile-time benchmarks.

collector/src/compile/execute/bencher.rs

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ use crate::compile::execute::{
1010
};
1111
use crate::toolchain::Toolchain;
1212
use crate::utils::git::get_rustc_perf_commit;
13-
use crate::CollectorCtx;
1413
use anyhow::Context;
1514
use database::CollectionId;
1615
use futures::stream::FuturesUnordered;
@@ -43,7 +42,7 @@ pub struct BenchProcessor<'a> {
4342
benchmark: &'a BenchmarkName,
4443
conn: &'a mut dyn database::Connection,
4544
artifact: &'a database::ArtifactId,
46-
collector_ctx: &'a CollectorCtx,
45+
artifact_row_id: database::ArtifactIdNumber,
4746
is_first_collection: bool,
4847
is_self_profile: bool,
4948
tries: u8,
@@ -55,7 +54,7 @@ impl<'a> BenchProcessor<'a> {
5554
conn: &'a mut dyn database::Connection,
5655
benchmark: &'a BenchmarkName,
5756
artifact: &'a database::ArtifactId,
58-
collector_ctx: &'a CollectorCtx,
57+
artifact_row_id: database::ArtifactIdNumber,
5958
is_self_profile: bool,
6059
) -> Self {
6160
// Check we have `perf` or (`xperf.exe` and `tracelog.exe`) available.
@@ -79,7 +78,7 @@ impl<'a> BenchProcessor<'a> {
7978
conn,
8079
benchmark,
8180
artifact,
82-
collector_ctx,
81+
artifact_row_id,
8382
is_first_collection: true,
8483
is_self_profile,
8584
tries: 0,
@@ -109,7 +108,7 @@ impl<'a> BenchProcessor<'a> {
109108
for (stat, value) in stats.iter() {
110109
buf.push(self.conn.record_statistic(
111110
collection,
112-
self.collector_ctx.artifact_row_id,
111+
self.artifact_row_id,
113112
self.benchmark.0.as_str(),
114113
profile,
115114
scenario,
@@ -124,13 +123,7 @@ impl<'a> BenchProcessor<'a> {
124123
}
125124

126125
pub async fn measure_rustc(&mut self, toolchain: &Toolchain) -> anyhow::Result<()> {
127-
rustc::measure(
128-
self.conn,
129-
toolchain,
130-
self.artifact,
131-
self.collector_ctx.artifact_row_id,
132-
)
133-
.await
126+
rustc::measure(self.conn, toolchain, self.artifact, self.artifact_row_id).await
134127
}
135128
}
136129

@@ -259,7 +252,7 @@ impl Processor for BenchProcessor<'_> {
259252
.map(|profile| {
260253
self.conn.record_raw_self_profile(
261254
profile.collection,
262-
self.collector_ctx.artifact_row_id,
255+
self.artifact_row_id,
263256
self.benchmark.0.as_str(),
264257
profile.profile,
265258
profile.scenario,
@@ -277,7 +270,7 @@ impl Processor for BenchProcessor<'_> {
277270

278271
// FIXME: Record codegen backend in the self profile name
279272
let prefix = PathBuf::from("self-profile")
280-
.join(self.collector_ctx.artifact_row_id.0.to_string())
273+
.join(self.artifact_row_id.0.to_string())
281274
.join(self.benchmark.0.as_str())
282275
.join(profile.profile.to_string())
283276
.join(profile.scenario.to_id());

collector/src/lib.rs

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,7 @@ pub mod utils;
1717

1818
use crate::compile::benchmark::{Benchmark, BenchmarkName};
1919
use crate::runtime::{BenchmarkGroup, BenchmarkSuite};
20-
use database::selector::CompileTestCase;
2120
use database::{ArtifactId, ArtifactIdNumber, Connection};
22-
use hashbrown::HashSet;
2321
use process::Stdio;
2422
use std::time::{Duration, Instant};
2523

@@ -332,30 +330,23 @@ impl CollectorStepBuilder {
332330
tx.commit().await.unwrap();
333331
artifact_row_id
334332
};
335-
// Find out which tests cases were already computed
336-
let measured_compile_test_cases = conn
337-
.get_compile_test_cases_with_measurements(&artifact_row_id)
338-
.await
339-
.expect("cannot fetch measured compile test cases from DB");
340-
341-
CollectorCtx {
342-
artifact_row_id,
343-
measured_compile_test_cases,
344-
}
333+
CollectorCtx { artifact_row_id }
345334
}
346335
}
347336

348337
/// Represents an in-progress run for a given artifact.
349338
pub struct CollectorCtx {
350339
pub artifact_row_id: ArtifactIdNumber,
351-
/// Which tests cases were already computed **before** this collection began?
352-
pub measured_compile_test_cases: HashSet<CompileTestCase>,
353340
}
354341

355342
impl CollectorCtx {
356-
pub async fn start_compile_step(&self, conn: &dyn Connection, benchmark_name: &BenchmarkName) {
343+
pub async fn start_compile_step(
344+
&self,
345+
conn: &dyn Connection,
346+
benchmark_name: &BenchmarkName,
347+
) -> bool {
357348
conn.collector_start_step(self.artifact_row_id, &benchmark_name.0)
358-
.await;
349+
.await
359350
}
360351

361352
pub async fn end_compile_step(&self, conn: &dyn Connection, benchmark_name: &BenchmarkName) {

0 commit comments

Comments
 (0)