Skip to content

Commit cb68e94

Browse files
[CI][Bench] Combine all benchmark-related jobs in one place (#20439)
plus cleanups. Each commit makes its own change - the most important part is the last commit.
1 parent 12f324d commit cb68e94

File tree

9 files changed

+133
-148
lines changed

9 files changed

+133
-148
lines changed

.github/workflows/sycl-detect-changes.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@ jobs:
6767
- devops/dependencies-igc-dev.json
6868
benchmarks:
6969
- 'devops/scripts/benchmarks/**'
70+
- 'devops/actions/run-tests/benchmark/**'
71+
- '.github/workflows/sycl-ur-perf-benchmarking.yml'
7072
perf-tests:
7173
- sycl/test-e2e/PerformanceTests/**
7274
esimd:

.github/workflows/sycl-linux-precommit.yml

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ on:
99
- sycl
1010
- sycl-rel-**
1111
# Do not run builds if changes are only in the following locations
12+
# Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead)
1213
paths-ignore:
1314
- '.github/ISSUE_TEMPLATE/**'
1415
- '.github/CODEOWNERS'
@@ -32,6 +33,9 @@ on:
3233
- 'unified-runtime/test/**'
3334
- 'unified-runtime/third_party/**'
3435
- 'unified-runtime/tools/**'
36+
- 'devops/scripts/benchmarks/**'
37+
- 'devops/actions/run-tests/benchmark/**'
38+
- '.github/workflows/sycl-ur-perf-benchmarking.yml'
3539

3640
concurrency:
3741
# Cancel a currently running workflow from the same PR, branch or tag.
@@ -225,29 +229,6 @@ jobs:
225229
skip_run: ${{matrix.use_igc_dev && contains(github.event.pull_request.labels.*.name, 'ci-no-devigc') || 'false'}}
226230
env: ${{ matrix.env || (contains(needs.detect_changes.outputs.filters, 'esimd') && '{}' || '{"LIT_FILTER_OUT":"ESIMD/"}') }}
227231

228-
test_benchmark_scripts:
229-
needs: [build, detect_changes]
230-
if: |
231-
always() && !cancelled()
232-
&& needs.build.outputs.build_conclusion == 'success'
233-
&& contains(needs.detect_changes.outputs.filters, 'benchmarks')
234-
uses: ./.github/workflows/sycl-linux-run-tests.yml
235-
with:
236-
name: Benchmark suite precommit testing
237-
runner: '["PVC_PERF"]'
238-
image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest
239-
image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN
240-
target_devices: 'level_zero:gpu'
241-
tests_selector: benchmarks
242-
benchmark_upload_results: false
243-
benchmark_preset: 'Minimal'
244-
benchmark_dry_run: true
245-
benchmark_exit_on_failure: true
246-
repo_ref: ${{ github.sha }}
247-
toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }}
248-
toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }}
249-
toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }}
250-
251232
test-perf:
252233
needs: [build, detect_changes]
253234
if: |

.github/workflows/sycl-linux-run-tests.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,7 @@ on:
134134
type: string
135135
default: 'Minimal'
136136
required: False
137+
# dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs)
137138
benchmark_dry_run:
138139
description: |
139140
Whether or not to fail the workflow upon a regression.

.github/workflows/sycl-nightly-benchmarking.yml

Lines changed: 0 additions & 52 deletions
This file was deleted.

.github/workflows/sycl-ur-perf-benchmarking.yml

Lines changed: 119 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1,59 +1,19 @@
1-
name: Run Benchmarks
1+
# A combined workflow for all benchmarks-related jobs for SYCL and UR.
2+
# Supports both manual triggering (dispatch) and nightly runs.
3+
# It also tests changes to benchmark scripts/framework in PR, if modified.
4+
name: SYCL Run Benchmarks
25

36
on:
4-
workflow_call:
5-
inputs:
6-
preset:
7-
type: string
8-
description: |
9-
Benchmark presets to run: See /devops/scripts/benchmarks/presets.py
10-
required: false
11-
default: 'Minimal' # Only compute-benchmarks
12-
pr_no:
13-
type: string
14-
description: |
15-
PR no. to build SYCL from if specified: SYCL will be built from HEAD
16-
of incoming branch used by the specified PR no.
17-
18-
If both pr_no and commit_hash are empty, the latest commit in
19-
deployment branch will be used.
20-
required: false
21-
default: ''
22-
commit_hash:
23-
type: string
24-
description: |
25-
Commit hash (within intel/llvm) to build SYCL from if specified.
26-
27-
If both pr_no and commit_hash are empty, the latest commit in
28-
deployment branch will be used.
29-
required: false
30-
default: ''
31-
save_name:
32-
type: string
33-
description: |
34-
Specify a custom name to use for the benchmark result: If uploading
35-
results, this will be the name used to refer results from the current
36-
run.
37-
required: false
38-
default: ''
39-
upload_results:
40-
type: string # true/false: workflow_dispatch does not support booleans
41-
description: |
42-
Upload results to https://intel.github.io/llvm/benchmarks/.
43-
required: true
44-
exit_on_failure:
45-
type: string # true/false: workflow_dispatch does not support booleans
46-
description: |
47-
Fail benchmark script on any error. Limit number of iterations to just test correctness.
48-
required: false
49-
default: 'false'
50-
runner:
51-
type: string
52-
required: true
53-
backend:
54-
type: string
55-
required: true
56-
7+
schedule:
8+
# 3 hours ahead of SYCL nightly
9+
- cron: '0 0 * * *'
10+
# Run on pull requests only when a benchmark-related files were changed.
11+
pull_request:
12+
# These paths are exactly the same as in sycl-linux/windows-precommit.yml (to ignore over there)
13+
paths:
14+
- 'devops/scripts/benchmarks/**'
15+
- 'devops/actions/run-tests/benchmark/**'
16+
- '.github/workflows/sycl-ur-perf-benchmarking.yml'
5717
workflow_dispatch:
5818
inputs:
5919
preset:
@@ -66,6 +26,8 @@ on:
6626
- Minimal
6727
- Normal
6828
- Test
29+
- Gromacs
30+
- OneDNN
6931
default: 'Minimal' # Only compute-benchmarks
7032
pr_no:
7133
type: string
@@ -115,13 +77,14 @@ on:
11577
options:
11678
- 'level_zero:gpu'
11779
- 'level_zero_v2:gpu'
118-
# As of #17407, sycl-linux-build now builds v2 by default
11980

12081
permissions: read-all
12182

12283
jobs:
123-
sanitize_inputs:
124-
name: Sanitize inputs
84+
# Manual trigger (dispatch) path:
85+
sanitize_inputs_dispatch:
86+
name: '[Dispatch] Sanitize inputs'
87+
if: github.event_name == 'workflow_dispatch'
12588
runs-on: ubuntu-latest
12689
env:
12790
COMMIT_HASH: ${{ inputs.commit_hash }}
@@ -169,25 +132,25 @@ jobs:
169132
echo "Final sanitized values:"
170133
cat $GITHUB_OUTPUT
171134
172-
build_sycl:
173-
name: Build SYCL
174-
needs: [ sanitize_inputs ]
135+
build_sycl_dispatch:
136+
name: '[Dispatch] Build SYCL'
137+
needs: [ sanitize_inputs_dispatch ]
175138
uses: ./.github/workflows/sycl-linux-build.yml
176139
with:
177-
build_ref: ${{ needs.sanitize_inputs.outputs.build_ref }}
140+
build_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }}
178141
build_cache_root: "/__w/"
179142
build_cache_suffix: "prod_noassert"
180143
build_configure_extra_args: "--no-assertions"
181144
build_image: "ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest"
182145
cc: clang
183146
cxx: clang++
184147
changes: '[]'
185-
186148
toolchain_artifact: sycl_linux_prod_noassert
187149

188-
run_benchmarks_build:
189-
name: Run Benchmarks on Build
190-
needs: [ build_sycl, sanitize_inputs ]
150+
benchmark_dispatch:
151+
name: '[Dispatch] Benchmarks'
152+
needs: [ build_sycl_dispatch, sanitize_inputs_dispatch ]
153+
if: always() && !cancelled() && needs.build_sycl_dispatch.outputs.build_conclusion == 'success'
191154
strategy:
192155
matrix:
193156
include:
@@ -197,17 +160,102 @@ jobs:
197160
uses: ./.github/workflows/sycl-linux-run-tests.yml
198161
secrets: inherit
199162
with:
200-
name: Run compute-benchmarks (${{ matrix.save_name }}, ${{ matrix.runner }}, ${{ matrix.backend }})
163+
name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})"
201164
runner: ${{ matrix.runner }}
202165
image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest
203166
image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN
204167
target_devices: ${{ matrix.backend }}
205168
tests_selector: benchmarks
206169
benchmark_upload_results: ${{ inputs.upload_results }}
207-
benchmark_save_name: ${{ needs.sanitize_inputs.outputs.benchmark_save_name }}
170+
benchmark_save_name: ${{ needs.sanitize_inputs_dispatch.outputs.benchmark_save_name }}
208171
benchmark_preset: ${{ inputs.preset }}
209172
benchmark_exit_on_failure: ${{ inputs.exit_on_failure }}
210-
repo_ref: ${{ needs.sanitize_inputs.outputs.build_ref }}
211-
toolchain_artifact: ${{ needs.build_sycl.outputs.toolchain_artifact }}
212-
toolchain_artifact_filename: ${{ needs.build_sycl.outputs.toolchain_artifact_filename }}
213-
toolchain_decompress_command: ${{ needs.build_sycl.outputs.toolchain_decompress_command }}
173+
repo_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }}
174+
toolchain_artifact: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact }}
175+
toolchain_artifact_filename: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact_filename }}
176+
toolchain_decompress_command: ${{ needs.build_sycl_dispatch.outputs.toolchain_decompress_command }}
177+
# END manual trigger (dispatch) path
178+
179+
# Nightly benchmarking path:
180+
build_nightly:
181+
name: '[Nightly] Build SYCL'
182+
if: github.repository == 'intel/llvm' && github.event_name == 'schedule'
183+
uses: ./.github/workflows/sycl-linux-build.yml
184+
secrets: inherit
185+
with:
186+
build_cache_root: "/__w/"
187+
build_configure_extra_args: '--no-assertions'
188+
build_image: ghcr.io/intel/llvm/ubuntu2404_build:latest
189+
190+
toolchain_artifact: sycl_linux_default
191+
toolchain_artifact_filename: sycl_linux.tar.gz
192+
193+
benchmark_nightly:
194+
name: '[Nightly] Benchmarks'
195+
needs: [build_nightly]
196+
if: always() && !cancelled() && needs.build_nightly.outputs.build_conclusion == 'success'
197+
strategy:
198+
fail-fast: false
199+
matrix:
200+
runner: ['["PVC_PERF"]', '["BMG_PERF"]']
201+
backend: ['level_zero:gpu', 'level_zero_v2:gpu']
202+
include:
203+
- ref: ${{ github.sha }}
204+
save_name: 'Baseline'
205+
preset: 'Full'
206+
uses: ./.github/workflows/sycl-linux-run-tests.yml
207+
secrets: inherit
208+
with:
209+
name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})"
210+
runner: ${{ matrix.runner }}
211+
image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest
212+
image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN
213+
target_devices: ${{ matrix.backend }}
214+
tests_selector: benchmarks
215+
benchmark_upload_results: true
216+
benchmark_save_name: ${{ matrix.save_name }}
217+
benchmark_preset: ${{ matrix.preset }}
218+
repo_ref: ${{ matrix.ref }}
219+
toolchain_artifact: ${{ needs.build_nightly.outputs.toolchain_artifact }}
220+
toolchain_artifact_filename: ${{ needs.build_nightly.outputs.toolchain_artifact_filename }}
221+
toolchain_decompress_command: ${{ needs.build_nightly.outputs.toolchain_decompress_command }}
222+
# END nightly benchmarking path
223+
224+
# Benchmark framework builds and runs on PRs path:
225+
build_pr:
226+
name: '[PR] Build SYCL'
227+
if: github.event_name == 'pull_request'
228+
uses: ./.github/workflows/sycl-linux-build.yml
229+
with:
230+
build_ref: ${{ github.sha }}
231+
build_cache_root: "/__w/"
232+
build_cache_suffix: "default"
233+
# Docker image has last nightly pre-installed and added to the PATH
234+
build_image: "ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest"
235+
cc: clang
236+
cxx: clang++
237+
changes: ${{ needs.detect_changes.outputs.filters }}
238+
toolchain_artifact: sycl_linux_default
239+
240+
# TODO: When we have stable BMG runner(s), consider moving this job to that runner.
241+
test_benchmark_framework:
242+
name: '[PR] Benchmark suite testing'
243+
needs: [build_pr]
244+
if: always() && !cancelled() && needs.build_pr.outputs.build_conclusion == 'success'
245+
uses: ./.github/workflows/sycl-linux-run-tests.yml
246+
with:
247+
name: 'Framework test: PVC_PERF, L0, Minimal preset'
248+
runner: '["PVC_PERF"]'
249+
image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest
250+
image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN
251+
target_devices: 'level_zero:gpu'
252+
tests_selector: benchmarks
253+
benchmark_upload_results: false
254+
benchmark_preset: 'Minimal'
255+
benchmark_dry_run: true
256+
benchmark_exit_on_failure: true
257+
repo_ref: ${{ github.sha }}
258+
toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }}
259+
toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }}
260+
toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }}
261+
# END benchmark framework builds and runs on PRs path

.github/workflows/sycl-windows-precommit.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ on:
77
- llvmspirv_pulldown
88
- sycl-rel-**
99
# Do not run builds if changes are only in the following locations
10+
# Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead)
1011
paths-ignore:
1112
- '.github/ISSUE_TEMPLATE/**'
1213
- '.github/CODEOWNERS'
@@ -32,6 +33,9 @@ on:
3233
- 'unified-runtime/test/**'
3334
- 'unified-runtime/third_party/**'
3435
- 'unified-runtime/tools/**'
36+
- 'devops/scripts/benchmarks/**'
37+
- 'devops/actions/run-tests/benchmark/**'
38+
- '.github/workflows/sycl-ur-perf-benchmarking.yml'
3539

3640
permissions: read-all
3741

devops/actions/run-tests/benchmark/action.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ inputs:
2929
type: string
3030
required: False
3131
default: ""
32+
# dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs)
3233
dry_run:
3334
type: string
3435
required: False

devops/scripts/benchmarks/CONTRIB.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ The benchmark suite generates an interactive HTML dashboard that visualizes `Res
170170
* If adding to an existing category, modify the corresponding `Suite` class (e.g., `benches/compute.py`) to instantiate and return your new benchmark in its `benchmarks()` method.
171171
* If creating a new category, create a new `Suite` class inheriting from `benches.base.Suite`. Implement `name()` and `benchmarks()`. Add necessary `setup()` if the suite requires shared setup. Add group metadata via `additional_metadata()` if needed.
172172
3. **Register Suite:** Import and add your new `Suite` instance to the `suites` list in `main.py`.
173-
4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` to include the new suite in presets' description.
173+
4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` and benchmarking workflow to include the new suite in presets' description/choices.
174174

175175
## Recommendations
176176

0 commit comments

Comments
 (0)