Skip to content
This repository was archived by the owner on Apr 23, 2020. It is now read-only.

Commit b0671cb

Browse files
committed
Add a script to run various benchmarks and send the result to lnt.
Lnt is both a server and a set of script for benchmarking llvm. I don't think it makes sense to use the scripts for lld since our benchmarks are quite different. The server on the other hand is very general and seems to work well for tracking any quantities. This patch adds a script to lld that can be used to run various benchmarks and send the result to lnt. The benchmarks are assumed to each be a response file in a subdirectory. Each subdirectory can contain multiple response files. That can be used to have a plain response.txt and a response-icf.txt for example. The name of each benchmark is the combination of the directory name and the "flavor": firefox-gc, chromium-icf, etc. For the first version the script uses perf and collects all the metrics that a plain "perf stat" prints. This script can then be used by a developer to test a patch or by a bot to keep track of lld's performance. git-svn-id: https://llvm.org/svn/llvm-project/lld/trunk@318158 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent f6b4f8d commit b0671cb

File tree

2 files changed

+174
-0
lines changed

2 files changed

+174
-0
lines changed

utils/benchmark.py

+135
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
#!/usr/bin/env python
2+
#
3+
# The LLVM Compiler Infrastructure
4+
#
5+
# This file is distributed under the University of Illinois Open Source
6+
# License. See LICENSE.TXT for details.
7+
#
8+
# ==------------------------------------------------------------------------==#
9+
10+
import os
11+
import glob
12+
import re
13+
import subprocess
14+
import json
15+
import datetime
16+
import argparse
17+
import urllib
18+
import urllib2
19+
20+
parser = argparse.ArgumentParser()
21+
parser.add_argument('benchmark_directory')
22+
parser.add_argument('--runs', type=int, default=10)
23+
parser.add_argument('--wrapper', default='')
24+
parser.add_argument('--machine', required=True)
25+
parser.add_argument('--revision', required=True)
26+
parser.add_argument('--threads', action='store_true')
27+
parser.add_argument('--url', help='The lnt server url to send the results to',
28+
default='http://localhost:8000/db_default/v4/link/submitRun')
29+
args = parser.parse_args()
30+
31+
class Bench:
32+
def __init__(self, directory, variant):
33+
self.directory = directory
34+
self.variant = variant
35+
def __str__(self):
36+
if not self.variant:
37+
return self.directory
38+
return '%s-%s' % (self.directory, self.variant)
39+
40+
def getBenchmarks():
41+
ret = []
42+
for i in glob.glob('*/response*.txt'):
43+
m = re.match('response-(.*)\.txt', os.path.basename(i))
44+
variant = m.groups()[0] if m else None
45+
ret.append(Bench(os.path.dirname(i), variant))
46+
return ret
47+
48+
def parsePerfNum(num):
49+
num = num.replace(b',',b'')
50+
try:
51+
return int(num)
52+
except ValueError:
53+
return float(num)
54+
55+
def parsePerfLine(line):
56+
ret = {}
57+
line = line.split(b'#')[0].strip()
58+
if len(line) != 0:
59+
p = line.split()
60+
ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
61+
return ret
62+
63+
def parsePerf(output):
64+
ret = {}
65+
lines = [x.strip() for x in output.split(b'\n')]
66+
67+
seconds = [x for x in lines if b'seconds time elapsed' in x][0]
68+
seconds = seconds.strip().split()[0].strip()
69+
ret['seconds-elapsed'] = parsePerfNum(seconds)
70+
71+
measurement_lines = [x for x in lines if b'#' in x]
72+
for l in measurement_lines:
73+
ret.update(parsePerfLine(l))
74+
return ret
75+
76+
def run(cmd):
77+
try:
78+
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
79+
except subprocess.CalledProcessError as e:
80+
print(e.output)
81+
raise e
82+
83+
def combinePerfRun(acc, d):
84+
for k,v in d.items():
85+
a = acc.get(k, [])
86+
a.append(v)
87+
acc[k] = a
88+
89+
def perf(cmd):
90+
# Discard the first run to warm up any system cache.
91+
run(cmd)
92+
93+
ret = {}
94+
wrapper_args = [x for x in args.wrapper.split(',') if x]
95+
for i in range(args.runs):
96+
os.unlink('t')
97+
out = run(wrapper_args + ['perf', 'stat'] + cmd)
98+
r = parsePerf(out)
99+
combinePerfRun(ret, r)
100+
os.unlink('t')
101+
return ret
102+
103+
def runBench(bench):
104+
thread_arg = [] if args.threads else ['--no-threads']
105+
os.chdir(bench.directory)
106+
suffix = '-%s' % bench.variant if bench.variant else ''
107+
response = 'response' + suffix + '.txt'
108+
ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
109+
ret['name'] = str(bench)
110+
os.chdir('..')
111+
return ret
112+
113+
def buildLntJson(benchmarks):
114+
start = datetime.datetime.utcnow().isoformat()
115+
tests = [runBench(b) for b in benchmarks]
116+
end = datetime.datetime.utcnow().isoformat()
117+
ret = {
118+
'format_version' : 2,
119+
'machine' : { 'name' : args.machine },
120+
'run' : {
121+
'end_time' : start,
122+
'start_time' : end,
123+
'llvm_project_revision': args.revision
124+
},
125+
'tests' : tests
126+
}
127+
return json.dumps(ret, sort_keys=True, indent=4)
128+
129+
def submitToServer(data):
130+
data2 = urllib.urlencode({ 'input_data' : data }).encode('ascii')
131+
urllib2.urlopen(urllib2.Request(args.url, data2))
132+
133+
os.chdir(args.benchmark_directory)
134+
data = buildLntJson(getBenchmarks())
135+
submitToServer(data)

utils/link.yaml

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
format_version: '2'
2+
name: link
3+
run_fields:
4+
- name: llvm_project_revision
5+
order: true
6+
machine_fields:
7+
- name: hardware
8+
- name: os
9+
metrics:
10+
- name: branch-misses
11+
bigger_is_better: false
12+
type: Real
13+
- name: stalled-cycles-frontend
14+
bigger_is_better: false
15+
type: Real
16+
- name: branches
17+
bigger_is_better: false
18+
type: Real
19+
- name: context-switches
20+
bigger_is_better: false
21+
type: Real
22+
- name: cpu-migrations
23+
bigger_is_better: false
24+
type: Real
25+
- name: cycles
26+
bigger_is_better: false
27+
type: Real
28+
- name: instructions
29+
bigger_is_better: false
30+
type: Real
31+
- name: seconds-elapsed
32+
bigger_is_better: false
33+
type: Real
34+
- name: page-faults
35+
bigger_is_better: false
36+
type: Real
37+
- name: task-clock
38+
bigger_is_better: false
39+
type: Real

0 commit comments

Comments
 (0)