1
1
# Executes benchmarks implemented in this repository using scripts
2
- # for results visualization from intel/llvm (unified-runtime dir) .
2
+ # for results visualization from intel/llvm.
3
3
name : Benchmarks
4
4
5
5
on :
14
14
required : false
15
15
type : string
16
16
default : ' '
17
- upload_report :
18
- required : false
19
- type : boolean
20
- default : false
21
17
22
18
permissions :
23
- contents : read
19
+ contents : write
24
20
pull-requests : write
25
21
26
22
env :
38
34
# Workspace on self-hosted runners is not cleaned automatically.
39
35
# We have to delete the files created outside of using actions.
40
36
- name : Cleanup self-hosted workspace
41
- if : always()
37
+ if : false
42
38
run : |
43
39
ls -la ./
44
40
rm -rf ./* || true
@@ -97,23 +93,32 @@ jobs:
97
93
- name : Build UMF
98
94
run : cmake --build ${{env.BUILD_DIR}} -j $(nproc)
99
95
100
- # Get scripts for benchmark data visualization.
101
- # Use specific tag, as the scripts or files' location may change.
102
- - name : Checkout SYCL
96
+ - name : Checkout UMF results branch
97
+ uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
98
+ with :
99
+ ref : benchmark-results
100
+ path : results-repo
101
+
102
+ # Get scripts for benchmark data visualization (from SYCL repo).
103
+ # Use specific ref, as the scripts or files' location may change.
104
+ - name : Checkout benchmark scripts
103
105
uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
104
106
with :
105
107
repository : intel/llvm
106
- # [BENCHMARK] fix default timeout parameter
107
- # https://github.com/intel/llvm/pull/17412
108
- ref : 357e9e0b253b7eba105d044e38452b3c09169f8a
109
- path : sycl-repo
110
- fetch-depth : 1
108
+ # Note: The same ref is used in docs build (for dashboard generation)!
109
+ #
110
+ # 20.03.2025
111
+ # branch: unify-benchmark-ci
112
+ ref : cae7049c78c697b3ac94f931716d9efb53addcd8
113
+ path : sc
114
+ sparse-checkout : |
115
+ devops/scripts/benchmarks
111
116
112
117
- name : Install benchmarking scripts deps
113
118
run : |
114
119
python -m venv .venv
115
120
source .venv/bin/activate
116
- pip install -r ${{github.workspace}}/sycl-repo/unified-runtime/third_party/benchmark_requirements .txt
121
+ pip install -r ${{github.workspace}}/sc/devops/scripts/benchmarks/requirements .txt
117
122
118
123
- name : Set core range and GPU mask
119
124
run : |
@@ -135,22 +140,21 @@ jobs:
135
140
136
141
- name : Run UMF benchmarks
137
142
id : benchmarks
138
- working-directory : ${{env.BUILD_DIR}}
139
143
run : >
140
- source ${{github.workspace}}/ .venv/bin/activate &&
141
- taskset -c ${{ env.CORES }} ${{ github.workspace }}/sycl-repo/unified-runtime /scripts/benchmarks/main.py
144
+ source .venv/bin/activate &&
145
+ taskset -c ${{ env.CORES }} ./sc/devops /scripts/benchmarks/main.py
142
146
~/bench_workdir_umf
143
147
--umf ${{env.BUILD_DIR}}
144
- --compare baseline
145
148
--timeout 3000
146
- ${{ inputs.upload_report && '--output-html' || '' }}
147
- ${{ inputs.pr_no != 0 && '--output-markdown' || '' }}
149
+ --output-html remote
150
+ --results-dir ${{ github.workspace }}/results-repo
151
+ --output-markdown
148
152
${{ inputs.bench_script_params }}
149
153
150
154
# In case it failed to add a comment, we can still print the results.
151
155
- name : Print benchmark results
152
- if : ${{ always() && inputs.pr_no != 0 }}
153
- run : cat ${{env.BUILD_DIR }}/benchmark_results.md
156
+ if : ${{ always() }}
157
+ run : cat ${{ github.workspace }}/benchmark_results.md || true
154
158
155
159
- name : Add comment to PR
156
160
uses : actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
@@ -160,7 +164,7 @@ jobs:
160
164
let markdown = ""
161
165
try {
162
166
const fs = require('fs');
163
- markdown = fs.readFileSync('${{env.BUILD_DIR }}/benchmark_results.md', 'utf8');
167
+ markdown = fs.readFileSync('${{ github.workspace }}/benchmark_results.md', 'utf8');
164
168
} catch(err) {
165
169
}
166
170
@@ -177,15 +181,42 @@ jobs:
177
181
repo: context.repo.repo,
178
182
body: body
179
183
})
180
-
181
- - name : Upload HTML report
182
- if : ${{ always() && inputs.upload_report }}
183
- uses : actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
184
- with :
185
- path : umf-repo/build/benchmark_results.html
186
- key : benchmark-results-${{ github.run_id }}
187
184
188
- - name : Get information about platform
189
- if : ${{ always() }}
190
- working-directory : ${{env.UMF_DIR}}
191
- run : .github/scripts/get_system_info.sh
185
+ - name : Commit data.json and results directory
186
+ working-directory : results-repo
187
+ run : |
188
+ git config --global user.name "GitHub Actions Bot"
189
+ git config --global user.email "[email protected] "
190
+
191
+ for attempt in {1..5}; do
192
+ echo "Attempt $attempt to push changes"
193
+
194
+ rm -f data.json
195
+ cp ${{ github.workspace }}/sc/devops/scripts/benchmarks/html/data.json .
196
+
197
+ git add data.json results/
198
+ git commit -m "Add benchmark results and data.json"
199
+
200
+ results_file=$(git diff HEAD~1 --name-only -- results/ | head -n 1)
201
+
202
+ if git push origin benchmark-results; then
203
+ echo "Push succeeded"
204
+ break
205
+ fi
206
+
207
+ echo "Push failed, retrying..."
208
+
209
+ if [ -n "$results_file" ]; then
210
+ mv $results_file ${{ github.workspace }}/temp_$(basename $results_file)
211
+
212
+ git reset --hard origin/benchmark-results
213
+ git pull origin benchmark-results
214
+
215
+ new_file="results/$(basename "$results_file")"
216
+ mv ${{ github.workspace }}/temp_$(basename $results_file) $new_file
217
+ fi
218
+
219
+ echo "Regenerating data.json"
220
+ (cd ${{ github.workspace }} && ${{ github.workspace }}/sc/devops/scripts/benchmarks/main.py ~/bench_workdir_umf --dry-run --results-dir ${{ github.workspace }}/results-repo --output-html remote)
221
+
222
+ done
0 commit comments