Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/angle
Path: blob/main_old/scripts/process_angle_perf_results.py
1693 views
1
#!/usr/bin/env vpython
2
#
3
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
4
# Use of this source code is governed by a BSD-style license that can be
5
# found in the LICENSE file.
6
#
7
# process_angle_perf_results.py:
8
# Perf result merging and upload. Adapted from the Chromium script:
9
# https://chromium.googlesource.com/chromium/src/+/main/tools/perf/process_perf_results.py
10
11
from __future__ import print_function
12
13
import argparse
14
import collections
15
import json
16
import logging
17
import multiprocessing
18
import os
19
import shutil
20
import sys
21
import tempfile
22
import time
23
import uuid
24
25
logging.basicConfig(
26
level=logging.INFO,
27
format='(%(levelname)s) %(asctime)s pid=%(process)d'
28
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
29
30
d = os.path.dirname
31
ANGLE_DIR = d(d(os.path.realpath(__file__)))
32
sys.path.append(os.path.join(ANGLE_DIR, 'tools', 'perf'))
33
import cross_device_test_config
34
35
from core import path_util
36
37
path_util.AddTelemetryToPath()
38
from core import upload_results_to_perf_dashboard
39
from core import bot_platforms
40
from core import results_merger
41
42
path_util.AddAndroidPylibToPath()
43
try:
44
from pylib.utils import logdog_helper
45
except ImportError:
46
pass
47
48
path_util.AddTracingToPath()
49
from tracing.value import histogram
50
from tracing.value import histogram_set
51
from tracing.value.diagnostics import generic_set
52
from tracing.value.diagnostics import reserved_infos
53
54
RESULTS_URL = 'https://chromeperf.appspot.com'
55
JSON_CONTENT_TYPE = 'application/json'
56
MACHINE_GROUP = 'ANGLE'
57
BUILD_URL = 'https://ci.chromium.org/ui/p/angle/builders/ci/%s/%d'
58
59
60
def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
61
output_json_file):
62
"""Upload the contents of result JSON(s) to the perf dashboard."""
63
args = [
64
'--buildername',
65
build_properties['buildername'],
66
'--buildnumber',
67
build_properties['buildnumber'],
68
'--name',
69
name,
70
'--configuration-name',
71
configuration_name,
72
'--results-file',
73
json_to_upload,
74
'--results-url',
75
RESULTS_URL,
76
'--output-json-file',
77
output_json_file,
78
'--perf-dashboard-machine-group',
79
MACHINE_GROUP,
80
'--got-angle-revision',
81
build_properties['got_angle_revision'],
82
'--send-as-histograms',
83
'--project',
84
'angle',
85
]
86
87
if build_properties.get('git_revision'):
88
args.append('--git-revision')
89
args.append(build_properties['git_revision'])
90
91
#TODO(crbug.com/1072729): log this in top level
92
logging.info('upload_results_to_perf_dashboard: %s.' % args)
93
94
return upload_results_to_perf_dashboard.main(args)
95
96
97
def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
98
"""Merges the contents of one or more results JSONs.
99
100
Args:
101
output_json: A path to a JSON file to which the merged results should be
102
written.
103
jsons_to_merge: A list of JSON files that should be merged.
104
extra_links: a (key, value) map in which keys are the human-readable strings
105
which describe the data, and value is logdog url that contain the data.
106
"""
107
begin_time = time.time()
108
merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
109
110
# Only append the perf results links if present
111
if extra_links:
112
merged_results['links'] = extra_links
113
114
with open(output_json, 'w') as f:
115
json.dump(merged_results, f)
116
117
end_time = time.time()
118
print_duration('Merging json test results', begin_time, end_time)
119
return 0
120
121
122
def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
123
"""Checks the test_results.json under each folder:
124
125
1. mark the benchmark 'enabled' if tests results are found
126
2. add the json content to a list for non-ref.
127
"""
128
begin_time = time.time()
129
benchmark_enabled_map = {}
130
for benchmark_name, directories in benchmark_directory_map.items():
131
for directory in directories:
132
# Obtain the test name we are running
133
is_ref = '.reference' in benchmark_name
134
enabled = True
135
try:
136
with open(os.path.join(directory, 'test_results.json')) as json_data:
137
json_results = json.load(json_data)
138
if not json_results:
139
# Output is null meaning the test didn't produce any results.
140
# Want to output an error and continue loading the rest of the
141
# test results.
142
logging.warning('No results produced for %s, skipping upload' % directory)
143
continue
144
if json_results.get('version') == 3:
145
# Non-telemetry tests don't have written json results but
146
# if they are executing then they are enabled and will generate
147
# chartjson results.
148
if not bool(json_results.get('tests')):
149
enabled = False
150
if not is_ref:
151
# We don't need to upload reference build data to the
152
# flakiness dashboard since we don't monitor the ref build
153
test_results_list.append(json_results)
154
except IOError as e:
155
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
156
# we have a non-zero exit code if we error out?
157
logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
158
continue
159
if not enabled:
160
# We don't upload disabled benchmarks or tests that are run
161
# as a smoke test
162
logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
163
continue
164
benchmark_enabled_map[benchmark_name] = True
165
166
end_time = time.time()
167
print_duration('Analyzing perf json test results', begin_time, end_time)
168
return benchmark_enabled_map
169
170
171
def _generate_unique_logdog_filename(name_prefix):
172
return name_prefix + '_' + str(uuid.uuid4())
173
174
175
def _handle_perf_logs(benchmark_directory_map, extra_links):
176
""" Upload benchmark logs to logdog and add a page entry for them. """
177
begin_time = time.time()
178
benchmark_logs_links = collections.defaultdict(list)
179
180
for benchmark_name, directories in benchmark_directory_map.items():
181
for directory in directories:
182
benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
183
if os.path.exists(benchmark_log_file):
184
with open(benchmark_log_file) as f:
185
uploaded_link = logdog_helper.text(
186
name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
187
benchmark_logs_links[benchmark_name].append(uploaded_link)
188
189
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
190
logdog_stream = logdog_helper.text(
191
logdog_file_name,
192
json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
193
content_type=JSON_CONTENT_TYPE)
194
extra_links['Benchmarks logs'] = logdog_stream
195
end_time = time.time()
196
print_duration('Generating perf log streams', begin_time, end_time)
197
198
199
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
200
begin_time = time.time()
201
with open(benchmarks_shard_map_file) as f:
202
benchmarks_shard_data = f.read()
203
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
204
logdog_stream = logdog_helper.text(
205
logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
206
extra_links['Benchmarks shard map'] = logdog_stream
207
end_time = time.time()
208
print_duration('Generating benchmark shard map stream', begin_time, end_time)
209
210
211
def _get_benchmark_name(directory):
212
return os.path.basename(directory).replace(" benchmark", "")
213
214
215
def _scan_output_dir(task_output_dir):
216
benchmark_directory_map = {}
217
benchmarks_shard_map_file = None
218
219
directory_list = [
220
f for f in os.listdir(task_output_dir)
221
if not os.path.isfile(os.path.join(task_output_dir, f))
222
]
223
benchmark_directory_list = []
224
for directory in directory_list:
225
for f in os.listdir(os.path.join(task_output_dir, directory)):
226
path = os.path.join(task_output_dir, directory, f)
227
if os.path.isdir(path):
228
benchmark_directory_list.append(path)
229
elif path.endswith('benchmarks_shard_map.json'):
230
benchmarks_shard_map_file = path
231
# Now create a map of benchmark name to the list of directories
232
# the lists were written to.
233
for directory in benchmark_directory_list:
234
benchmark_name = _get_benchmark_name(directory)
235
if benchmark_name in benchmark_directory_map.keys():
236
benchmark_directory_map[benchmark_name].append(directory)
237
else:
238
benchmark_directory_map[benchmark_name] = [directory]
239
240
return benchmark_directory_map, benchmarks_shard_map_file
241
242
243
def process_perf_results(output_json,
244
configuration_name,
245
build_properties,
246
task_output_dir,
247
smoke_test_mode,
248
output_results_dir,
249
lightweight=False,
250
skip_perf=False):
251
"""Process perf results.
252
253
Consists of merging the json-test-format output, uploading the perf test
254
output (histogram), and store the benchmark logs in logdog.
255
256
Each directory in the task_output_dir represents one benchmark
257
that was run. Within this directory, there is a subdirectory with the name
258
of the benchmark that was run. In that subdirectory, there is a
259
perftest-output.json file containing the performance results in histogram
260
format and an output.json file containing the json test results for the
261
benchmark.
262
263
Returns:
264
(return_code, upload_results_map):
265
return_code is 0 if the whole operation is successful, non zero otherwise.
266
benchmark_upload_result_map: the dictionary that describe which benchmarks
267
were successfully uploaded.
268
"""
269
handle_perf = not lightweight or not skip_perf
270
handle_non_perf = not lightweight or skip_perf
271
logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
272
(lightweight, handle_perf, handle_non_perf))
273
274
begin_time = time.time()
275
return_code = 0
276
benchmark_upload_result_map = {}
277
278
benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
279
280
test_results_list = []
281
extra_links = {}
282
283
if handle_non_perf:
284
# First, upload benchmarks shard map to logdog and add a page
285
# entry for it in extra_links.
286
if benchmarks_shard_map_file:
287
_handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
288
289
# Second, upload all the benchmark logs to logdog and add a page entry for
290
# those links in extra_links.
291
_handle_perf_logs(benchmark_directory_map, extra_links)
292
293
# Then try to obtain the list of json test results to merge
294
# and determine the status of each benchmark.
295
benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
296
test_results_list)
297
298
build_properties_map = json.loads(build_properties)
299
if not configuration_name:
300
# we are deprecating perf-id crbug.com/817823
301
configuration_name = build_properties_map['buildername']
302
303
_update_perf_results_for_calibration(benchmarks_shard_map_file, benchmark_enabled_map,
304
benchmark_directory_map, configuration_name)
305
if not smoke_test_mode and handle_perf:
306
try:
307
return_code, benchmark_upload_result_map = _handle_perf_results(
308
benchmark_enabled_map, benchmark_directory_map, configuration_name,
309
build_properties_map, extra_links, output_results_dir)
310
except Exception:
311
logging.exception('Error handling perf results jsons')
312
return_code = 1
313
314
if handle_non_perf:
315
# Finally, merge all test results json, add the extra links and write out to
316
# output location
317
try:
318
_merge_json_output(output_json, test_results_list, extra_links,
319
configuration_name in cross_device_test_config.TARGET_DEVICES)
320
except Exception:
321
logging.exception('Error handling test results jsons.')
322
323
end_time = time.time()
324
print_duration('Total process_perf_results', begin_time, end_time)
325
return return_code, benchmark_upload_result_map
326
327
328
def _merge_histogram_results(histogram_lists):
329
merged_results = []
330
for histogram_list in histogram_lists:
331
merged_results += histogram_list
332
333
return merged_results
334
335
336
def _load_histogram_set_from_dict(data):
337
histograms = histogram_set.HistogramSet()
338
histograms.ImportDicts(data)
339
return histograms
340
341
342
def _add_build_info(results, benchmark_name, build_properties):
343
histograms = _load_histogram_set_from_dict(results)
344
345
common_diagnostics = {
346
reserved_infos.MASTERS:
347
build_properties['builder_group'],
348
reserved_infos.BOTS:
349
build_properties['buildername'],
350
reserved_infos.POINT_ID:
351
build_properties['angle_commit_pos'],
352
reserved_infos.BENCHMARKS:
353
benchmark_name,
354
reserved_infos.ANGLE_REVISIONS:
355
build_properties['got_angle_revision'],
356
reserved_infos.BUILD_URLS:
357
BUILD_URL % (build_properties['buildername'], build_properties['buildnumber']),
358
}
359
360
for k, v in common_diagnostics.items():
361
histograms.AddSharedDiagnosticToAllHistograms(k.name, generic_set.GenericSet([v]))
362
363
return histograms.AsDicts()
364
365
366
def _merge_perf_results(benchmark_name, results_filename, directories, build_properties):
367
begin_time = time.time()
368
collected_results = []
369
for directory in directories:
370
filename = os.path.join(directory, 'perf_results.json')
371
try:
372
with open(filename) as pf:
373
collected_results.append(json.load(pf))
374
except IOError as e:
375
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
376
# we have a non-zero exit code if we error out?
377
logging.error('Failed to obtain perf results from %s: %s', directory, e)
378
if not collected_results:
379
logging.error('Failed to obtain any perf results from %s.', benchmark_name)
380
return
381
382
# Assuming that multiple shards will be histogram set
383
# Non-telemetry benchmarks only ever run on one shard
384
merged_results = []
385
assert (isinstance(collected_results[0], list))
386
merged_results = _merge_histogram_results(collected_results)
387
388
# Write additional histogram build info.
389
merged_results = _add_build_info(merged_results, benchmark_name, build_properties)
390
391
with open(results_filename, 'w') as rf:
392
json.dump(merged_results, rf)
393
394
end_time = time.time()
395
print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
396
397
398
def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
399
output_json_file):
400
tmpfile_dir = tempfile.mkdtemp()
401
try:
402
upload_begin_time = time.time()
403
# There are potentially multiple directores with results, re-write and
404
# merge them if necessary
405
results_filename = None
406
if len(directories) > 1:
407
merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
408
if not os.path.exists(merge_perf_dir):
409
os.makedirs(merge_perf_dir)
410
results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
411
_merge_perf_results(benchmark_name, results_filename, directories, build_properties)
412
else:
413
# It was only written to one shard, use that shards data
414
results_filename = os.path.join(directories[0], 'perf_results.json')
415
416
results_size_in_mib = os.path.getsize(results_filename) / (2**20)
417
logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
418
(benchmark_name, results_size_in_mib))
419
with open(output_json_file, 'w') as oj:
420
upload_return_code = _upload_perf_results(results_filename, benchmark_name,
421
configuration_name, build_properties, oj)
422
upload_end_time = time.time()
423
print_duration(('%s upload time' % (benchmark_name)), upload_begin_time,
424
upload_end_time)
425
return (benchmark_name, upload_return_code == 0)
426
finally:
427
shutil.rmtree(tmpfile_dir)
428
429
430
def _upload_individual_benchmark(params):
431
try:
432
return _upload_individual(*params)
433
except Exception:
434
benchmark_name = params[0]
435
upload_succeed = False
436
logging.exception('Error uploading perf result of %s' % benchmark_name)
437
return benchmark_name, upload_succeed
438
439
440
def _GetCpuCount(log=True):
441
try:
442
cpu_count = multiprocessing.cpu_count()
443
if sys.platform == 'win32':
444
# TODO(crbug.com/1190269) - we can't use more than 56
445
# cores on Windows or Python3 may hang.
446
cpu_count = min(cpu_count, 56)
447
return cpu_count
448
except NotImplementedError:
449
if log:
450
logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
451
# TODO(crbug.com/948281): This is currently set to 4 since the mac masters
452
# only have 4 cores. Once we move to all-linux, this can be increased or
453
# we can even delete this whole function and use multiprocessing.cpu_count()
454
# directly.
455
return 4
456
457
458
def _load_shard_id_from_test_results(directory):
459
shard_id = None
460
test_json_path = os.path.join(directory, 'test_results.json')
461
try:
462
with open(test_json_path) as f:
463
test_json = json.load(f)
464
all_results = test_json['tests']
465
for _, benchmark_results in all_results.items():
466
for _, measurement_result in benchmark_results.items():
467
shard_id = measurement_result['shard']
468
break
469
except IOError as e:
470
logging.error('Failed to open test_results.json from %s: %s', test_json_path, e)
471
except KeyError as e:
472
logging.error('Failed to locate results in test_results.json: %s', e)
473
return shard_id
474
475
476
def _find_device_id_by_shard_id(benchmarks_shard_map_file, shard_id):
477
try:
478
with open(benchmarks_shard_map_file) as f:
479
shard_map_json = json.load(f)
480
device_id = shard_map_json['extra_infos']['bot #%s' % shard_id]
481
except KeyError as e:
482
logging.error('Failed to locate device name in shard map: %s', e)
483
return device_id
484
485
486
def _update_perf_json_with_summary_on_device_id(directory, device_id):
487
perf_json_path = os.path.join(directory, 'perf_results.json')
488
try:
489
with open(perf_json_path, 'r') as f:
490
perf_json = json.load(f)
491
except IOError as e:
492
logging.error('Failed to open perf_results.json from %s: %s', perf_json_path, e)
493
summary_key_guid = str(uuid.uuid4())
494
summary_key_generic_set = {
495
'values': ['device_id'],
496
'guid': summary_key_guid,
497
'type': 'GenericSet'
498
}
499
perf_json.insert(0, summary_key_generic_set)
500
logging.info('Inserted summary key generic set for perf result in %s: %s', directory,
501
summary_key_generic_set)
502
stories_guids = set()
503
for entry in perf_json:
504
if 'diagnostics' in entry:
505
entry['diagnostics']['summaryKeys'] = summary_key_guid
506
stories_guids.add(entry['diagnostics']['stories'])
507
for entry in perf_json:
508
if 'guid' in entry and entry['guid'] in stories_guids:
509
entry['values'].append(device_id)
510
try:
511
with open(perf_json_path, 'w') as f:
512
json.dump(perf_json, f)
513
except IOError as e:
514
logging.error('Failed to writing perf_results.json to %s: %s', perf_json_path, e)
515
logging.info('Finished adding device id %s in perf result.', device_id)
516
517
518
def _should_add_device_id_in_perf_result(builder_name):
519
# We should always add device id in calibration builders.
520
# For testing purpose, adding fyi as well for faster turnaround, because
521
# calibration builders run every 24 hours.
522
return any([builder_name == p.name for p in bot_platforms.CALIBRATION_PLATFORMS
523
]) or (builder_name == 'android-pixel2-perf-fyi')
524
525
526
def _update_perf_results_for_calibration(benchmarks_shard_map_file, benchmark_enabled_map,
527
benchmark_directory_map, configuration_name):
528
if not _should_add_device_id_in_perf_result(configuration_name):
529
return
530
logging.info('Updating perf results for %s.', configuration_name)
531
for benchmark_name, directories in benchmark_directory_map.items():
532
if not benchmark_enabled_map.get(benchmark_name, False):
533
continue
534
for directory in directories:
535
shard_id = _load_shard_id_from_test_results(directory)
536
device_id = _find_device_id_by_shard_id(benchmarks_shard_map_file, shard_id)
537
_update_perf_json_with_summary_on_device_id(directory, device_id)
538
539
540
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
541
build_properties, extra_links, output_results_dir):
542
"""
543
Upload perf results to the perf dashboard.
544
545
This method also upload the perf results to logdog and augment it to
546
|extra_links|.
547
548
Returns:
549
(return_code, benchmark_upload_result_map)
550
return_code is 0 if this upload to perf dashboard successfully, 1
551
otherwise.
552
benchmark_upload_result_map is a dictionary describes which benchmark
553
was successfully uploaded.
554
"""
555
begin_time = time.time()
556
# Upload all eligible benchmarks to the perf dashboard
557
results_dict = {}
558
559
invocations = []
560
for benchmark_name, directories in benchmark_directory_map.items():
561
if not benchmark_enabled_map.get(benchmark_name, False):
562
continue
563
# Create a place to write the perf results that you will write out to
564
# logdog.
565
output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
566
results_dict[benchmark_name] = output_json_file
567
#TODO(crbug.com/1072729): pass final arguments instead of build properties
568
# and configuration_name
569
invocations.append(
570
(benchmark_name, directories, configuration_name, build_properties, output_json_file))
571
572
# Kick off the uploads in multiple processes
573
# crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
574
# to 2 processes to avoid this error. Uncomment the following code once
575
# the problem is fixed on the dashboard side.
576
# pool = multiprocessing.Pool(_GetCpuCount())
577
pool = multiprocessing.Pool(2)
578
upload_result_timeout = False
579
try:
580
async_result = pool.map_async(_upload_individual_benchmark, invocations)
581
# TODO(crbug.com/947035): What timeout is reasonable?
582
results = async_result.get(timeout=4000)
583
except multiprocessing.TimeoutError:
584
upload_result_timeout = True
585
logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
586
results = []
587
for benchmark_name in benchmark_directory_map:
588
results.append((benchmark_name, False))
589
finally:
590
pool.terminate()
591
592
# Keep a mapping of benchmarks to their upload results
593
benchmark_upload_result_map = {}
594
for r in results:
595
benchmark_upload_result_map[r[0]] = r[1]
596
597
logdog_dict = {}
598
upload_failures_counter = 0
599
logdog_stream = None
600
logdog_label = 'Results Dashboard'
601
for benchmark_name, output_file in results_dict.items():
602
upload_succeed = benchmark_upload_result_map[benchmark_name]
603
if not upload_succeed:
604
upload_failures_counter += 1
605
is_reference = '.reference' in benchmark_name
606
_write_perf_data_to_logfile(
607
benchmark_name,
608
output_file,
609
configuration_name,
610
build_properties,
611
logdog_dict,
612
is_reference,
613
upload_failure=not upload_succeed)
614
615
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
616
logdog_stream = logdog_helper.text(
617
logdog_file_name,
618
json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
619
content_type=JSON_CONTENT_TYPE)
620
if upload_failures_counter > 0:
621
logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
622
extra_links[logdog_label] = logdog_stream
623
end_time = time.time()
624
print_duration('Uploading results to perf dashboard', begin_time, end_time)
625
if upload_result_timeout or upload_failures_counter > 0:
626
return 1, benchmark_upload_result_map
627
return 0, benchmark_upload_result_map
628
629
630
def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
631
logdog_dict, is_ref, upload_failure):
632
viewer_url = None
633
# logdog file to write perf results to
634
if os.path.exists(output_file):
635
results = None
636
with open(output_file) as f:
637
try:
638
results = json.load(f)
639
except ValueError:
640
logging.error('Error parsing perf results JSON for benchmark %s' % benchmark_name)
641
if results:
642
try:
643
json_fname = _generate_unique_logdog_filename(benchmark_name)
644
output_json_file = logdog_helper.open_text(json_fname)
645
json.dump(results, output_json_file, indent=4, separators=(',', ': '))
646
except ValueError as e:
647
logging.error('ValueError: "%s" while dumping output to logdog' % e)
648
finally:
649
output_json_file.close()
650
viewer_url = output_json_file.get_viewer_url()
651
else:
652
logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
653
654
base_benchmark_name = benchmark_name.replace('.reference', '')
655
656
if base_benchmark_name not in logdog_dict:
657
logdog_dict[base_benchmark_name] = {}
658
659
# add links for the perf results and the dashboard url to
660
# the logs section of buildbot
661
if is_ref:
662
if viewer_url:
663
logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
664
if upload_failure:
665
logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
666
else:
667
# TODO(jmadill): Figure out if we can get a dashboard URL here. http://anglebug.com/6090
668
# logdog_dict[base_benchmark_name]['dashboard_url'] = (
669
# upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
670
# RESULTS_URL,
671
# build_properties['got_revision_cp'],
672
# _GetMachineGroup(build_properties)))
673
if viewer_url:
674
logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
675
if upload_failure:
676
logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
677
678
679
def print_duration(step, start, end):
680
logging.info('Duration of %s: %d seconds' % (step, end - start))
681
682
683
def main():
684
""" See collect_task.collect_task for more on the merge script API. """
685
logging.info(sys.argv)
686
parser = argparse.ArgumentParser()
687
# configuration-name (previously perf-id) is the name of bot the tests run on
688
# For example, buildbot-test is the name of the android-go-perf bot
689
# configuration-name and results-url are set in the json file which is going
690
# away tools/perf/core/chromium.perf.fyi.extras.json
691
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
692
693
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
694
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
695
parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
696
parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
697
parser.add_argument(
698
'--skip-perf',
699
action='store_true',
700
help='In lightweight mode, using --skip-perf will skip the performance'
701
' data handling.')
702
parser.add_argument(
703
'--lightweight',
704
action='store_true',
705
help='Choose the lightweight mode in which the perf result handling'
706
' is performed on a separate VM.')
707
parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
708
parser.add_argument(
709
'--smoke-test-mode',
710
action='store_true',
711
help='This test should be run in smoke test mode'
712
' meaning it does not upload to the perf dashboard')
713
714
args = parser.parse_args()
715
716
output_results_dir = tempfile.mkdtemp('outputresults')
717
try:
718
return_code, _ = process_perf_results(args.output_json, args.configuration_name,
719
args.build_properties, args.task_output_dir,
720
args.smoke_test_mode, output_results_dir,
721
args.lightweight, args.skip_perf)
722
return return_code
723
finally:
724
shutil.rmtree(output_results_dir)
725
726
727
if __name__ == '__main__':
728
sys.exit(main())
729
730