From 987111f3fc15eb09a0bb2f9cdb52b082cacadf6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Fri, 17 Oct 2025 16:54:27 +0200 Subject: [PATCH 1/4] [CI][Bench] Cleanups in workflows and bench-related scripts --- .github/workflows/sycl-linux-run-tests.yml | 1 + .github/workflows/sycl-nightly-benchmarking.yml | 11 ++++++----- .github/workflows/sycl-ur-perf-benchmarking.yml | 3 +++ devops/actions/run-tests/benchmark/action.yml | 1 + devops/scripts/benchmarks/CONTRIB.md | 2 +- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index f247256da4ab..cc8733aaa830 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -134,6 +134,7 @@ on: type: string default: 'Minimal' required: False + # dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs) benchmark_dry_run: description: | Whether or not to fail the workflow upon a regression. diff --git a/.github/workflows/sycl-nightly-benchmarking.yml b/.github/workflows/sycl-nightly-benchmarking.yml index 9d1098ee68b3..1cad7533a2b6 100644 --- a/.github/workflows/sycl-nightly-benchmarking.yml +++ b/.github/workflows/sycl-nightly-benchmarking.yml @@ -9,7 +9,8 @@ on: permissions: read-all jobs: - ubuntu2204_build: + ubuntu_build: + name: Build toolkit if: github.repository == 'intel/llvm' uses: ./.github/workflows/sycl-linux-build.yml secrets: inherit @@ -21,9 +22,9 @@ jobs: toolchain_artifact: sycl_linux_default toolchain_artifact_filename: sycl_linux.tar.gz - run-sycl-benchmarks: - needs: [ubuntu2204_build] - if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + Benchmarks: + needs: [ubuntu_build] + if: ${{ always() && !cancelled() && needs.ubuntu_build.outputs.build_conclusion == 'success' }} strategy: fail-fast: false matrix: @@ -36,7 +37,7 @@ jobs: uses: ./.github/workflows/sycl-linux-run-tests.yml secrets: inherit with: - name: Run compute-benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}) + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" runner: ${{ matrix.runner }} image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index 0fd16fd77dd4..53f49c35d206 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -1,6 +1,7 @@ name: Run Benchmarks on: + # XXX: is this workflow called anywhere? workflow_call: inputs: preset: @@ -66,6 +67,8 @@ on: - Minimal - Normal - Test + - Gromacs + - OneDNN default: 'Minimal' # Only compute-benchmarks pr_no: type: string diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index 13dc4cf7cbf6..073501dcbb22 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -29,6 +29,7 @@ inputs: type: string required: False default: "" + # dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs) dry_run: type: string required: False diff --git a/devops/scripts/benchmarks/CONTRIB.md b/devops/scripts/benchmarks/CONTRIB.md index 7d1d1150e135..fb1964dad7fe 100644 --- a/devops/scripts/benchmarks/CONTRIB.md +++ b/devops/scripts/benchmarks/CONTRIB.md @@ -170,7 +170,7 @@ The benchmark suite generates an interactive HTML dashboard that visualizes `Res * If adding to an existing category, modify the corresponding `Suite` class (e.g., `benches/compute.py`) to instantiate and return your new benchmark in its `benchmarks()` method. * If creating a new category, create a new `Suite` class inheriting from `benches.base.Suite`. Implement `name()` and `benchmarks()`. Add necessary `setup()` if the suite requires shared setup. Add group metadata via `additional_metadata()` if needed. 3. **Register Suite:** Import and add your new `Suite` instance to the `suites` list in `main.py`. -4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` to include the new suite in presets' description. +4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` and benchmarking workflow to include the new suite in presets' description/choices. ## Recommendations From 17853eefd12e6fb339998ab55d3bba6c37034e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Wed, 22 Oct 2025 16:45:05 +0200 Subject: [PATCH 2/4] [CI][Bench] Remove unused workflow_call trigger in benchmark's workflow --- .../workflows/sycl-ur-perf-benchmarking.yml | 54 ------------------- 1 file changed, 54 deletions(-) diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index 53f49c35d206..4f2084dfb1e2 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -1,60 +1,6 @@ name: Run Benchmarks on: - # XXX: is this workflow called anywhere? - workflow_call: - inputs: - preset: - type: string - description: | - Benchmark presets to run: See /devops/scripts/benchmarks/presets.py - required: false - default: 'Minimal' # Only compute-benchmarks - pr_no: - type: string - description: | - PR no. to build SYCL from if specified: SYCL will be built from HEAD - of incoming branch used by the specified PR no. - - If both pr_no and commit_hash are empty, the latest commit in - deployment branch will be used. - required: false - default: '' - commit_hash: - type: string - description: | - Commit hash (within intel/llvm) to build SYCL from if specified. - - If both pr_no and commit_hash are empty, the latest commit in - deployment branch will be used. - required: false - default: '' - save_name: - type: string - description: | - Specify a custom name to use for the benchmark result: If uploading - results, this will be the name used to refer results from the current - run. - required: false - default: '' - upload_results: - type: string # true/false: workflow_dispatch does not support booleans - description: | - Upload results to https://intel.github.io/llvm/benchmarks/. - required: true - exit_on_failure: - type: string # true/false: workflow_dispatch does not support booleans - description: | - Fail benchmark script on any error. Limit number of iterations to just test correctness. - required: false - default: 'false' - runner: - type: string - required: true - backend: - type: string - required: true - workflow_dispatch: inputs: preset: From 64c9feeeea980e21ace3f416b5f15bf273d45301 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Wed, 22 Oct 2025 16:53:23 +0200 Subject: [PATCH 3/4] [CI][Bench] Combine all benchmark jobs in one workflow 1. Right now changing only benchmark framework triggers all SYCL build. Thanks to this change only relevant changes are triggered for testing framework. 2. Nightly build was seperated. I believe keeping everything in one place makes it easier to maintain changes. No changes in logic/builds were made in this commit - only minor cleanups like names, plus changed the triggers. --- .github/workflows/sycl-detect-changes.yml | 2 + .github/workflows/sycl-linux-precommit.yml | 27 +--- .../workflows/sycl-nightly-benchmarking.yml | 53 ------- .../workflows/sycl-ur-perf-benchmarking.yml | 133 +++++++++++++++--- .github/workflows/sycl-windows-precommit.yml | 4 + 5 files changed, 126 insertions(+), 93 deletions(-) delete mode 100644 .github/workflows/sycl-nightly-benchmarking.yml diff --git a/.github/workflows/sycl-detect-changes.yml b/.github/workflows/sycl-detect-changes.yml index 281e60db073b..964a14b4035d 100644 --- a/.github/workflows/sycl-detect-changes.yml +++ b/.github/workflows/sycl-detect-changes.yml @@ -67,6 +67,8 @@ jobs: - devops/dependencies-igc-dev.json benchmarks: - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' perf-tests: - sycl/test-e2e/PerformanceTests/** esimd: diff --git a/.github/workflows/sycl-linux-precommit.yml b/.github/workflows/sycl-linux-precommit.yml index bba5f526e580..eae8323f9544 100644 --- a/.github/workflows/sycl-linux-precommit.yml +++ b/.github/workflows/sycl-linux-precommit.yml @@ -9,6 +9,7 @@ on: - sycl - sycl-rel-** # Do not run builds if changes are only in the following locations + # Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead) paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' @@ -32,6 +33,9 @@ on: - 'unified-runtime/test/**' - 'unified-runtime/third_party/**' - 'unified-runtime/tools/**' + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' concurrency: # Cancel a currently running workflow from the same PR, branch or tag. @@ -225,29 +229,6 @@ jobs: skip_run: ${{matrix.use_igc_dev && contains(github.event.pull_request.labels.*.name, 'ci-no-devigc') || 'false'}} env: ${{ matrix.env || (contains(needs.detect_changes.outputs.filters, 'esimd') && '{}' || '{"LIT_FILTER_OUT":"ESIMD/"}') }} - test_benchmark_scripts: - needs: [build, detect_changes] - if: | - always() && !cancelled() - && needs.build.outputs.build_conclusion == 'success' - && contains(needs.detect_changes.outputs.filters, 'benchmarks') - uses: ./.github/workflows/sycl-linux-run-tests.yml - with: - name: Benchmark suite precommit testing - runner: '["PVC_PERF"]' - image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest - image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN - target_devices: 'level_zero:gpu' - tests_selector: benchmarks - benchmark_upload_results: false - benchmark_preset: 'Minimal' - benchmark_dry_run: true - benchmark_exit_on_failure: true - repo_ref: ${{ github.sha }} - toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }} - test-perf: needs: [build, detect_changes] if: | diff --git a/.github/workflows/sycl-nightly-benchmarking.yml b/.github/workflows/sycl-nightly-benchmarking.yml deleted file mode 100644 index 1cad7533a2b6..000000000000 --- a/.github/workflows/sycl-nightly-benchmarking.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: SYCL Nightly Benchmarking - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - # 3 hours ahead of SYCL nightly - -permissions: read-all - -jobs: - ubuntu_build: - name: Build toolkit - if: github.repository == 'intel/llvm' - uses: ./.github/workflows/sycl-linux-build.yml - secrets: inherit - with: - build_cache_root: "/__w/" - build_configure_extra_args: '--no-assertions' - build_image: ghcr.io/intel/llvm/ubuntu2404_build:latest - - toolchain_artifact: sycl_linux_default - toolchain_artifact_filename: sycl_linux.tar.gz - - Benchmarks: - needs: [ubuntu_build] - if: ${{ always() && !cancelled() && needs.ubuntu_build.outputs.build_conclusion == 'success' }} - strategy: - fail-fast: false - matrix: - runner: ['["PVC_PERF"]', '["BMG_PERF"]'] - backend: ['level_zero:gpu', 'level_zero_v2:gpu'] - include: - - ref: ${{ github.sha }} - save_name: 'Baseline' - preset: 'Full' - uses: ./.github/workflows/sycl-linux-run-tests.yml - secrets: inherit - with: - name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" - runner: ${{ matrix.runner }} - image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest - image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN - target_devices: ${{ matrix.backend }} - tests_selector: benchmarks - benchmark_upload_results: true - benchmark_save_name: ${{ matrix.save_name }} - benchmark_preset: ${{ matrix.preset }} - repo_ref: ${{ matrix.ref }} - toolchain_artifact: ${{ needs.ubuntu2204_build.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.ubuntu2204_build.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.toolchain_decompress_command }} - diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index 4f2084dfb1e2..e171164e11aa 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -1,6 +1,19 @@ +# A combined workflow for all benchmarks-related jobs for SYCL and UR. +# Supports both manual triggering (dispatch) and nightly runs. +# It also tests changes to benchmark scripts/framework in PR, if modified. name: Run Benchmarks on: + schedule: + # 3 hours ahead of SYCL nightly + - cron: '0 0 * * *' + # Run on pull requests only when a benchmark-related files were changed. + pull_request: + # These paths are exactly the same as in sycl-linux/windows-precommit.yml (to ignore over there) + paths: + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' workflow_dispatch: inputs: preset: @@ -64,13 +77,14 @@ on: options: - 'level_zero:gpu' - 'level_zero_v2:gpu' - # As of #17407, sycl-linux-build now builds v2 by default permissions: read-all jobs: - sanitize_inputs: - name: Sanitize inputs + # Manual trigger (dispatch) path: + sanitize_inputs_dispatch: + name: '[Dispatch] Sanitize inputs' + if: github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest env: COMMIT_HASH: ${{ inputs.commit_hash }} @@ -118,12 +132,12 @@ jobs: echo "Final sanitized values:" cat $GITHUB_OUTPUT - build_sycl: - name: Build SYCL - needs: [ sanitize_inputs ] + build_sycl_dispatch: + name: '[Dispatch] Build SYCL' + needs: [ sanitize_inputs_dispatch ] uses: ./.github/workflows/sycl-linux-build.yml with: - build_ref: ${{ needs.sanitize_inputs.outputs.build_ref }} + build_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }} build_cache_root: "/__w/" build_cache_suffix: "prod_noassert" build_configure_extra_args: "--no-assertions" @@ -131,12 +145,12 @@ jobs: cc: clang cxx: clang++ changes: '[]' - toolchain_artifact: sycl_linux_prod_noassert - run_benchmarks_build: - name: Run Benchmarks on Build - needs: [ build_sycl, sanitize_inputs ] + benchmark_dispatch: + name: '[Dispatch] Benchmarks' + needs: [ build_sycl_dispatch, sanitize_inputs_dispatch ] + if: always() && !cancelled() && needs.build_sycl_dispatch.outputs.build_conclusion == 'success' strategy: matrix: include: @@ -146,17 +160,102 @@ jobs: uses: ./.github/workflows/sycl-linux-run-tests.yml secrets: inherit with: - name: Run compute-benchmarks (${{ matrix.save_name }}, ${{ matrix.runner }}, ${{ matrix.backend }}) + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" runner: ${{ matrix.runner }} image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: ${{ matrix.backend }} tests_selector: benchmarks benchmark_upload_results: ${{ inputs.upload_results }} - benchmark_save_name: ${{ needs.sanitize_inputs.outputs.benchmark_save_name }} + benchmark_save_name: ${{ needs.sanitize_inputs_dispatch.outputs.benchmark_save_name }} benchmark_preset: ${{ inputs.preset }} benchmark_exit_on_failure: ${{ inputs.exit_on_failure }} - repo_ref: ${{ needs.sanitize_inputs.outputs.build_ref }} - toolchain_artifact: ${{ needs.build_sycl.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.build_sycl.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.build_sycl.outputs.toolchain_decompress_command }} + repo_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }} + toolchain_artifact: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build_sycl_dispatch.outputs.toolchain_decompress_command }} + # END manual trigger (dispatch) path + + # Nightly benchmarking path: + build_nightly: + name: '[Nightly] Build SYCL' + if: github.repository == 'intel/llvm' && github.event_name == 'schedule' + uses: ./.github/workflows/sycl-linux-build.yml + secrets: inherit + with: + build_cache_root: "/__w/" + build_configure_extra_args: '--no-assertions' + build_image: ghcr.io/intel/llvm/ubuntu2404_build:latest + + toolchain_artifact: sycl_linux_default + toolchain_artifact_filename: sycl_linux.tar.gz + + benchmark_nightly: + name: '[Nightly] Benchmarks' + needs: [build_nightly] + if: always() && !cancelled() && needs.build_nightly.outputs.build_conclusion == 'success' + strategy: + fail-fast: false + matrix: + runner: ['["PVC_PERF"]', '["BMG_PERF"]'] + backend: ['level_zero:gpu', 'level_zero_v2:gpu'] + include: + - ref: ${{ github.sha }} + save_name: 'Baseline' + preset: 'Full' + uses: ./.github/workflows/sycl-linux-run-tests.yml + secrets: inherit + with: + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" + runner: ${{ matrix.runner }} + image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: ${{ matrix.backend }} + tests_selector: benchmarks + benchmark_upload_results: true + benchmark_save_name: ${{ matrix.save_name }} + benchmark_preset: ${{ matrix.preset }} + repo_ref: ${{ matrix.ref }} + toolchain_artifact: ${{ needs.build_nightly.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build_nightly.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build_nightly.outputs.toolchain_decompress_command }} + # END nightly benchmarking path + + # Benchmark framework builds and runs on PRs path: + build_pr: + name: '[PR] Build SYCL' + if: github.event_name == 'pull_request' + uses: ./.github/workflows/sycl-linux-build.yml + with: + build_ref: ${{ github.sha }} + build_cache_root: "/__w/" + build_cache_suffix: "default" + # Docker image has last nightly pre-installed and added to the PATH + build_image: "ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest" + cc: clang + cxx: clang++ + changes: ${{ needs.detect_changes.outputs.filters }} + toolchain_artifact: sycl_linux_default + + # TODO: When we have stable BMG runner(s), consider moving this job to that runner. + test_benchmark_framework: + name: '[PR] Benchmark suite testing' + needs: [build_pr] + if: always() && !cancelled() && needs.build_pr.outputs.build_conclusion == 'success' + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: 'Framework test: PVC_PERF, L0, Minimal preset' + runner: '["PVC_PERF"]' + image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: 'level_zero:gpu' + tests_selector: benchmarks + benchmark_upload_results: false + benchmark_preset: 'Minimal' + benchmark_dry_run: true + benchmark_exit_on_failure: true + repo_ref: ${{ github.sha }} + toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }} + # END benchmark framework builds and runs on PRs path diff --git a/.github/workflows/sycl-windows-precommit.yml b/.github/workflows/sycl-windows-precommit.yml index ca7022c83b00..58254e849581 100644 --- a/.github/workflows/sycl-windows-precommit.yml +++ b/.github/workflows/sycl-windows-precommit.yml @@ -7,6 +7,7 @@ on: - llvmspirv_pulldown - sycl-rel-** # Do not run builds if changes are only in the following locations + # Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead) paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' @@ -32,6 +33,9 @@ on: - 'unified-runtime/test/**' - 'unified-runtime/third_party/**' - 'unified-runtime/tools/**' + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' permissions: read-all From 444463aa90bcd99b61a5031d5ce43af28393e68b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Fri, 24 Oct 2025 12:55:33 +0200 Subject: [PATCH 4/4] [CI][Bench] Adjust benchmarks' workflow name to include 'SYCL', as other SYCL workflows --- .github/workflows/sycl-ur-perf-benchmarking.yml | 2 +- devops/scripts/benchmarks/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index e171164e11aa..7d5b0cb74486 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -1,7 +1,7 @@ # A combined workflow for all benchmarks-related jobs for SYCL and UR. # Supports both manual triggering (dispatch) and nightly runs. # It also tests changes to benchmark scripts/framework in PR, if modified. -name: Run Benchmarks +name: SYCL Run Benchmarks on: schedule: diff --git a/devops/scripts/benchmarks/README.md b/devops/scripts/benchmarks/README.md index e7a2a1e743bb..5429f7578801 100644 --- a/devops/scripts/benchmarks/README.md +++ b/devops/scripts/benchmarks/README.md @@ -115,7 +115,7 @@ The benchmarks scripts are used in a GitHub Actions workflow, and can be automat ![compute benchmarks](workflow.png "Compute Benchmarks CI job") -To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). +To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). Once all the information is entered, click the "Run workflow" button to initiate a new workflow run. This will execute the benchmarks and then post the results as a comment on the specified Pull Request.