Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/ts/misc/summary.py
16344 views
1
#!/usr/bin/env python
2
3
import testlog_parser, sys, os, xml, glob, re
4
from table_formatter import *
5
from optparse import OptionParser
6
7
numeric_re = re.compile("(\d+)")
8
cvtype_re = re.compile("(8U|8S|16U|16S|32S|32F|64F)C(\d{1,3})")
9
cvtypes = { '8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6 }
10
11
convert = lambda text: int(text) if text.isdigit() else text
12
keyselector = lambda a: cvtype_re.sub(lambda match: " " + str(cvtypes.get(match.group(1), 7) + (int(match.group(2))-1) * 8) + " ", a)
13
alphanum_keyselector = lambda key: [ convert(c) for c in numeric_re.split(keyselector(key)) ]
14
15
def getSetName(tset, idx, columns, short = True):
16
if columns and len(columns) > idx:
17
prefix = columns[idx]
18
else:
19
prefix = None
20
if short and prefix:
21
return prefix
22
name = tset[0].replace(".xml","").replace("_", "\n")
23
if prefix:
24
return prefix + "\n" + ("-"*int(len(max(prefix.split("\n"), key=len))*1.5)) + "\n" + name
25
return name
26
27
if __name__ == "__main__":
28
if len(sys.argv) < 2:
29
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
30
exit(0)
31
32
parser = OptionParser()
33
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html', 'markdown' or 'auto' - default)", metavar="FMT", default="auto")
34
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
35
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
36
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
37
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
38
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
39
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
40
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
41
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
42
parser.add_option("", "--progress", action="store_true", dest="progress_mode", default=False, help="enable progress mode")
43
parser.add_option("", "--regressions", dest="regressions", default=None, metavar="LIST", help="comma-separated custom regressions map: \"[r][c]#current-#reference\" (indexes of columns are 0-based, \"r\" - reverse flag, \"c\" - color flag for base data)")
44
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
45
parser.add_option("", "--match", dest="match", default=None)
46
parser.add_option("", "--match-replace", dest="match_replace", default="")
47
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
48
parser.add_option("", "--intersect-logs", dest="intersect_logs", default=False, help="show only tests present in all log files")
49
parser.add_option("", "--show_units", action="store_true", dest="show_units", help="append units into table cells")
50
(options, args) = parser.parse_args()
51
52
options.generateHtml = detectHtmlOutputType(options.format)
53
if options.metric not in metrix_table:
54
options.metric = "gmean"
55
if options.metric.endswith("%") or options.metric.endswith("$"):
56
options.calc_relatives = False
57
options.calc_cr = False
58
if options.columns:
59
options.columns = [s.strip().replace("\\n", "\n") for s in options.columns.split(",")]
60
61
if options.regressions:
62
assert not options.progress_mode, 'unsupported mode'
63
64
def parseRegressionColumn(s):
65
""" Format: '[r][c]<uint>-<uint>' """
66
reverse = s.startswith('r')
67
if reverse:
68
s = s[1:]
69
addColor = s.startswith('c')
70
if addColor:
71
s = s[1:]
72
parts = s.split('-', 1)
73
link = (int(parts[0]), int(parts[1]), reverse, addColor)
74
assert link[0] != link[1]
75
return link
76
77
options.regressions = [parseRegressionColumn(s) for s in options.regressions.split(',')]
78
79
show_units = options.units if options.show_units else None
80
81
# expand wildcards and filter duplicates
82
files = []
83
seen = set()
84
for arg in args:
85
if ("*" in arg) or ("?" in arg):
86
flist = [os.path.abspath(f) for f in glob.glob(arg)]
87
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
88
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
89
else:
90
fname = os.path.abspath(arg)
91
if fname not in seen and not seen.add(fname):
92
files.append(fname)
93
94
# read all passed files
95
test_sets = []
96
for arg in files:
97
try:
98
tests = testlog_parser.parseLogFile(arg)
99
if options.filter:
100
expr = re.compile(options.filter)
101
tests = [t for t in tests if expr.search(str(t))]
102
if options.match:
103
tests = [t for t in tests if t.get("status") != "notrun"]
104
if tests:
105
test_sets.append((os.path.basename(arg), tests))
106
except IOError as err:
107
sys.stderr.write("IOError reading \"" + arg + "\" - " + str(err) + os.linesep)
108
except xml.parsers.expat.ExpatError as err:
109
sys.stderr.write("ExpatError reading \"" + arg + "\" - " + str(err) + os.linesep)
110
111
if not test_sets:
112
sys.stderr.write("Error: no test data found" + os.linesep)
113
quit()
114
115
setsCount = len(test_sets)
116
117
if options.regressions is None:
118
reference = -1 if options.progress_mode else 0
119
options.regressions = [(i, reference, False, True) for i in range(1, len(test_sets))]
120
121
for link in options.regressions:
122
(i, ref, reverse, addColor) = link
123
assert i >= 0 and i < setsCount
124
assert ref < setsCount
125
126
# find matches
127
test_cases = {}
128
129
name_extractor = lambda name: str(name)
130
if options.match:
131
reg = re.compile(options.match)
132
name_extractor = lambda name: reg.sub(options.match_replace, str(name))
133
134
for i in range(setsCount):
135
for case in test_sets[i][1]:
136
name = name_extractor(case)
137
if options.module:
138
name = options.module + "::" + name
139
if name not in test_cases:
140
test_cases[name] = [None] * setsCount
141
test_cases[name][i] = case
142
143
# build table
144
getter = metrix_table[options.metric][1]
145
getter_score = metrix_table["score"][1] if options.calc_score else None
146
getter_p = metrix_table[options.metric + "%"][1] if options.calc_relatives else None
147
getter_cr = metrix_table[options.metric + "$"][1] if options.calc_cr else None
148
tbl = table('%s (%s)' % (metrix_table[options.metric][0], options.units), options.format)
149
150
# header
151
tbl.newColumn("name", "Name of Test", align = "left", cssclass = "col_name")
152
for i in range(setsCount):
153
tbl.newColumn(str(i), getSetName(test_sets[i], i, options.columns, False), align = "center")
154
155
def addHeaderColumns(suffix, description, cssclass):
156
for link in options.regressions:
157
(i, ref, reverse, addColor) = link
158
if reverse:
159
i, ref = ref, i
160
current_set = test_sets[i]
161
current = getSetName(current_set, i, options.columns)
162
if ref >= 0:
163
reference_set = test_sets[ref]
164
reference = getSetName(reference_set, ref, options.columns)
165
else:
166
reference = 'previous'
167
tbl.newColumn(str(i) + '-' + str(ref) + suffix, '%s\nvs\n%s\n(%s)' % (current, reference, description), align='center', cssclass=cssclass)
168
169
if options.calc_cr:
170
addHeaderColumns(suffix='$', description='cycles reduction', cssclass='col_cr')
171
if options.calc_relatives:
172
addHeaderColumns(suffix='%', description='x-factor', cssclass='col_rel')
173
if options.calc_score:
174
addHeaderColumns(suffix='S', description='score', cssclass='col_name')
175
176
# rows
177
prevGroupName = None
178
needNewRow = True
179
lastRow = None
180
for name in sorted(test_cases.keys(), key=alphanum_keyselector):
181
cases = test_cases[name]
182
if needNewRow:
183
lastRow = tbl.newRow()
184
if not options.showall:
185
needNewRow = False
186
tbl.newCell("name", name)
187
188
groupName = next(c for c in cases if c).shortName()
189
if groupName != prevGroupName:
190
prop = lastRow.props.get("cssclass", "")
191
if "firstingroup" not in prop:
192
lastRow.props["cssclass"] = prop + " firstingroup"
193
prevGroupName = groupName
194
195
for i in range(setsCount):
196
case = cases[i]
197
if case is None:
198
if options.intersect_logs:
199
needNewRow = False
200
break
201
tbl.newCell(str(i), "-")
202
else:
203
status = case.get("status")
204
if status != "run":
205
tbl.newCell(str(i), status, color="red")
206
else:
207
val = getter(case, cases[0], options.units)
208
if val:
209
needNewRow = True
210
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val)
211
212
if needNewRow:
213
for link in options.regressions:
214
(i, reference, reverse, addColor) = link
215
if reverse:
216
i, reference = reference, i
217
tblCellID = str(i) + '-' + str(reference)
218
case = cases[i]
219
if case is None:
220
if options.calc_relatives:
221
tbl.newCell(tblCellID + "%", "-")
222
if options.calc_cr:
223
tbl.newCell(tblCellID + "$", "-")
224
if options.calc_score:
225
tbl.newCell(tblCellID + "$", "-")
226
else:
227
status = case.get("status")
228
if status != "run":
229
tbl.newCell(str(i), status, color="red")
230
if status != "notrun":
231
needNewRow = True
232
if options.calc_relatives:
233
tbl.newCell(tblCellID + "%", "-", color="red")
234
if options.calc_cr:
235
tbl.newCell(tblCellID + "$", "-", color="red")
236
if options.calc_score:
237
tbl.newCell(tblCellID + "S", "-", color="red")
238
else:
239
val = getter(case, cases[0], options.units)
240
def getRegression(fn):
241
if fn and val:
242
for j in reversed(range(i)) if reference < 0 else [reference]:
243
r = cases[j]
244
if r is not None and r.get("status") == 'run':
245
return fn(case, r, options.units)
246
valp = getRegression(getter_p) if options.calc_relatives or options.progress_mode else None
247
valcr = getRegression(getter_cr) if options.calc_cr else None
248
val_score = getRegression(getter_score) if options.calc_score else None
249
if not valp:
250
color = None
251
elif valp > 1.05:
252
color = 'green'
253
elif valp < 0.95:
254
color = 'red'
255
else:
256
color = None
257
if addColor:
258
if not reverse:
259
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val, color=color)
260
else:
261
r = cases[reference]
262
if r is not None and r.get("status") == 'run':
263
val = getter(r, cases[0], options.units)
264
tbl.newCell(str(reference), formatValue(val, options.metric, show_units), val, color=color)
265
if options.calc_relatives:
266
tbl.newCell(tblCellID + "%", formatValue(valp, "%"), valp, color=color, bold=color)
267
if options.calc_cr:
268
tbl.newCell(tblCellID + "$", formatValue(valcr, "$"), valcr, color=color, bold=color)
269
if options.calc_score:
270
tbl.newCell(tblCellID + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
271
272
if not needNewRow:
273
tbl.trimLastRow()
274
275
if options.regressionsOnly:
276
for r in reversed(range(len(tbl.rows))):
277
for i in range(1, len(options.regressions) + 1):
278
val = tbl.rows[r].cells[len(tbl.rows[r].cells) - i].value
279
if val is not None and val < float(options.regressionsOnly):
280
break
281
else:
282
tbl.rows.pop(r)
283
284
# output table
285
if options.generateHtml:
286
if options.format == "moinwiki":
287
tbl.htmlPrintTable(sys.stdout, True)
288
else:
289
htmlPrintHeader(sys.stdout, "Summary report for %s tests from %s test logs" % (len(test_cases), setsCount))
290
tbl.htmlPrintTable(sys.stdout)
291
htmlPrintFooter(sys.stdout)
292
else:
293
tbl.consolePrintTable(sys.stdout)
294
295
if options.regressionsOnly:
296
sys.exit(len(tbl.rows))
297
298