Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/angle
Path: blob/main_old/src/tests/restricted_traces/restricted_trace_gold_tests.py
1693 views
1
#! /usr/bin/env vpython
2
#
3
# [VPYTHON:BEGIN]
4
# wheel: <
5
# name: "infra/python/wheels/psutil/${vpython_platform}"
6
# version: "version:5.2.2"
7
# >
8
# wheel: <
9
# name: "infra/python/wheels/six-py2_py3"
10
# version: "version:1.10.0"
11
# >
12
# [VPYTHON:END]
13
#
14
# Copyright 2020 The ANGLE Project Authors. All rights reserved.
15
# Use of this source code is governed by a BSD-style license that can be
16
# found in the LICENSE file.
17
#
18
# restricted_trace_gold_tests.py:
19
# Uses Skia Gold (https://skia.org/dev/testing/skiagold) to run pixel tests with ANGLE traces.
20
#
21
# Requires vpython to run standalone. Run with --help for usage instructions.
22
23
import argparse
24
import contextlib
25
import fnmatch
26
import json
27
import logging
28
import os
29
import platform
30
import re
31
import shutil
32
import sys
33
import tempfile
34
import time
35
import traceback
36
37
# Add //src/testing into sys.path for importing xvfb and test_env, and
38
# //src/testing/scripts for importing common.
39
d = os.path.dirname
40
THIS_DIR = d(os.path.abspath(__file__))
41
sys.path.insert(0, d(THIS_DIR))
42
43
from skia_gold import angle_skia_gold_properties
44
from skia_gold import angle_skia_gold_session_manager
45
46
ANGLE_SRC_DIR = d(d(d(THIS_DIR)))
47
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing'))
48
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'scripts'))
49
# Handle the Chromium-relative directory as well. As long as one directory
50
# is valid, Python is happy.
51
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
52
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing'))
53
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'scripts'))
54
55
import common
56
import test_env
57
import xvfb
58
59
60
def IsWindows():
61
return sys.platform == 'cygwin' or sys.platform.startswith('win')
62
63
64
DEFAULT_TEST_SUITE = 'angle_perftests'
65
DEFAULT_TEST_PREFIX = 'TracePerfTest.Run/vulkan_'
66
DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_'
67
DEFAULT_BATCH_SIZE = 5
68
DEFAULT_LOG = 'info'
69
70
# Filters out stuff like: " I 72.572s run_tests_on_device(96071FFAZ00096) "
71
ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\) '
72
ANDROID_BEGIN_SYSTEM_INFO = '>>ScopedMainEntryLogger'
73
74
# Test expectations
75
FAIL = 'FAIL'
76
PASS = 'PASS'
77
SKIP = 'SKIP'
78
79
80
@contextlib.contextmanager
81
def temporary_dir(prefix=''):
82
path = tempfile.mkdtemp(prefix=prefix)
83
try:
84
yield path
85
finally:
86
logging.info("Removing temporary directory: %s" % path)
87
shutil.rmtree(path)
88
89
90
def add_skia_gold_args(parser):
91
group = parser.add_argument_group('Skia Gold Arguments')
92
group.add_argument('--git-revision', help='Revision being tested.', default=None)
93
group.add_argument(
94
'--gerrit-issue', help='For Skia Gold integration. Gerrit issue ID.', default='')
95
group.add_argument(
96
'--gerrit-patchset',
97
help='For Skia Gold integration. Gerrit patch set number.',
98
default='')
99
group.add_argument(
100
'--buildbucket-id', help='For Skia Gold integration. Buildbucket build ID.', default='')
101
group.add_argument(
102
'--bypass-skia-gold-functionality',
103
action='store_true',
104
default=False,
105
help='Bypass all interaction with Skia Gold, effectively disabling the '
106
'image comparison portion of any tests that use Gold. Only meant to '
107
'be used in case a Gold outage occurs and cannot be fixed quickly.')
108
local_group = group.add_mutually_exclusive_group()
109
local_group.add_argument(
110
'--local-pixel-tests',
111
action='store_true',
112
default=None,
113
help='Specifies to run the test harness in local run mode or not. When '
114
'run in local mode, uploading to Gold is disabled and links to '
115
'help with local debugging are output. Running in local mode also '
116
'implies --no-luci-auth. If both this and --no-local-pixel-tests are '
117
'left unset, the test harness will attempt to detect whether it is '
118
'running on a workstation or not and set this option accordingly.')
119
local_group.add_argument(
120
'--no-local-pixel-tests',
121
action='store_false',
122
dest='local_pixel_tests',
123
help='Specifies to run the test harness in non-local (bot) mode. When '
124
'run in this mode, data is actually uploaded to Gold and triage links '
125
'arge generated. If both this and --local-pixel-tests are left unset, '
126
'the test harness will attempt to detect whether it is running on a '
127
'workstation or not and set this option accordingly.')
128
group.add_argument(
129
'--no-luci-auth',
130
action='store_true',
131
default=False,
132
help='Don\'t use the service account provided by LUCI for '
133
'authentication for Skia Gold, instead relying on gsutil to be '
134
'pre-authenticated. Meant for testing locally instead of on the bots.')
135
136
137
def run_wrapper(args, cmd, env, stdoutfile=None):
138
if args.xvfb:
139
return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
140
else:
141
return test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
142
143
144
def to_hex(num):
145
return hex(int(num))
146
147
148
def to_hex_or_none(num):
149
return 'None' if num == None else to_hex(num)
150
151
152
def to_non_empty_string_or_none(val):
153
return 'None' if val == '' else str(val)
154
155
156
def to_non_empty_string_or_none_dict(d, key):
157
return 'None' if not key in d else to_non_empty_string_or_none(d[key])
158
159
160
def get_binary_name(binary):
161
if IsWindows():
162
return '.\\%s.exe' % binary
163
else:
164
return './%s' % binary
165
166
167
def get_skia_gold_keys(args, env):
168
"""Get all the JSON metadata that will be passed to golctl."""
169
# All values need to be strings, otherwise goldctl fails.
170
171
# Only call this method one time
172
if hasattr(get_skia_gold_keys, 'called') and get_skia_gold_keys.called:
173
logging.exception('get_skia_gold_keys may only be called once')
174
get_skia_gold_keys.called = True
175
176
class Filter:
177
178
def __init__(self):
179
self.accepting_lines = True
180
self.done_accepting_lines = False
181
self.android_prefix = re.compile(ANDROID_LOGGING_PREFIX)
182
self.lines = []
183
self.is_android = False
184
185
def append(self, line):
186
if self.done_accepting_lines:
187
return
188
if 'Additional test environment' in line or 'android/test_runner.py' in line:
189
self.accepting_lines = False
190
self.is_android = True
191
if ANDROID_BEGIN_SYSTEM_INFO in line:
192
self.accepting_lines = True
193
return
194
if not self.accepting_lines:
195
return
196
197
if self.is_android:
198
line = self.android_prefix.sub('', line)
199
200
if line[0] == '}':
201
self.done_accepting_lines = True
202
203
self.lines.append(line)
204
205
def get(self):
206
return self.lines
207
208
with common.temporary_file() as tempfile_path:
209
binary = get_binary_name('angle_system_info_test')
210
if run_wrapper(args, [binary, '--vulkan', '-v'], env, tempfile_path):
211
raise Exception('Error getting system info.')
212
213
filter = Filter()
214
215
with open(tempfile_path) as f:
216
for line in f:
217
filter.append(line)
218
219
str = ''.join(filter.get())
220
logging.info(str)
221
json_data = json.loads(str)
222
223
if len(json_data.get('gpus', [])) == 0 or not 'activeGPUIndex' in json_data:
224
raise Exception('Error getting system info.')
225
226
active_gpu = json_data['gpus'][json_data['activeGPUIndex']]
227
228
angle_keys = {
229
'vendor_id': to_hex_or_none(active_gpu['vendorId']),
230
'device_id': to_hex_or_none(active_gpu['deviceId']),
231
'model_name': to_non_empty_string_or_none_dict(active_gpu, 'machineModelVersion'),
232
'manufacturer_name': to_non_empty_string_or_none_dict(active_gpu, 'machineManufacturer'),
233
'os': to_non_empty_string_or_none(platform.system()),
234
'os_version': to_non_empty_string_or_none(platform.version()),
235
'driver_version': to_non_empty_string_or_none_dict(active_gpu, 'driverVersion'),
236
'driver_vendor': to_non_empty_string_or_none_dict(active_gpu, 'driverVendor'),
237
}
238
239
return angle_keys
240
241
242
def output_diff_local_files(gold_session, image_name):
243
"""Logs the local diff image files from the given SkiaGoldSession.
244
245
Args:
246
gold_session: A skia_gold_session.SkiaGoldSession instance to pull files
247
from.
248
image_name: A string containing the name of the image/test that was
249
compared.
250
"""
251
given_file = gold_session.GetGivenImageLink(image_name)
252
closest_file = gold_session.GetClosestImageLink(image_name)
253
diff_file = gold_session.GetDiffImageLink(image_name)
254
failure_message = 'Unable to retrieve link'
255
logging.error('Generated image: %s', given_file or failure_message)
256
logging.error('Closest image: %s', closest_file or failure_message)
257
logging.error('Diff image: %s', diff_file or failure_message)
258
259
260
def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, gold_properties,
261
screenshot_dir, image_name, artifacts):
262
"""Compares the given image using Skia Gold and uploads the result.
263
264
No uploading is done if the test is being run in local run mode. Compares
265
the given screenshot to baselines provided by Gold, raising an Exception if
266
a match is not found.
267
268
Args:
269
args: Command line options.
270
gold_session_manager: Skia Gold session manager.
271
gold_session: Skia Gold session.
272
gold_properties: Skia Gold properties.
273
screenshot_dir: directory where the test stores screenshots.
274
image_name: the name of the image being checked.
275
artifacts: dictionary of JSON artifacts to pass to the result merger.
276
"""
277
278
use_luci = not (gold_properties.local_pixel_tests or gold_properties.no_luci_auth)
279
280
# Note: this would be better done by iterating the screenshot directory.
281
png_file_name = os.path.join(screenshot_dir, DEFAULT_SCREENSHOT_PREFIX + image_name + '.png')
282
283
if not os.path.isfile(png_file_name):
284
logging.info('Screenshot not found, test skipped.')
285
return SKIP
286
287
status, error = gold_session.RunComparison(
288
name=image_name, png_file=png_file_name, use_luci=use_luci)
289
290
artifact_name = os.path.basename(png_file_name)
291
artifacts[artifact_name] = [artifact_name]
292
293
if not status:
294
return PASS
295
296
status_codes = gold_session_manager.GetSessionClass().StatusCodes
297
if status == status_codes.AUTH_FAILURE:
298
logging.error('Gold authentication failed with output %s', error)
299
elif status == status_codes.INIT_FAILURE:
300
logging.error('Gold initialization failed with output %s', error)
301
elif status == status_codes.COMPARISON_FAILURE_REMOTE:
302
_, triage_link = gold_session.GetTriageLinks(image_name)
303
if not triage_link:
304
logging.error('Failed to get triage link for %s, raw output: %s', image_name, error)
305
logging.error('Reason for no triage link: %s',
306
gold_session.GetTriageLinkOmissionReason(image_name))
307
elif gold_properties.IsTryjobRun():
308
artifacts['triage_link_for_entire_cl'] = [triage_link]
309
else:
310
artifacts['gold_triage_link'] = [triage_link]
311
elif status == status_codes.COMPARISON_FAILURE_LOCAL:
312
logging.error('Local comparison failed. Local diff files:')
313
output_diff_local_files(gold_session, image_name)
314
elif status == status_codes.LOCAL_DIFF_FAILURE:
315
logging.error(
316
'Local comparison failed and an error occurred during diff '
317
'generation: %s', error)
318
# There might be some files, so try outputting them.
319
logging.error('Local diff files:')
320
output_diff_local_files(gold_session, image_name)
321
else:
322
logging.error('Given unhandled SkiaGoldSession StatusCode %s with error %s', status, error)
323
324
return FAIL
325
326
327
def _get_batches(traces, batch_size):
328
for i in range(0, len(traces), batch_size):
329
yield traces[i:i + batch_size]
330
331
332
def _get_gtest_filter_for_batch(batch):
333
expanded = ['%s%s' % (DEFAULT_TEST_PREFIX, trace) for trace in batch]
334
return '--gtest_filter=%s' % ':'.join(expanded)
335
336
337
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
338
keys = get_skia_gold_keys(args, env)
339
340
with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
341
gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(args)
342
gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
343
skia_gold_temp_dir, gold_properties)
344
gold_session = gold_session_manager.GetSkiaGoldSession(keys)
345
346
traces = [trace.split(' ')[0] for trace in tests]
347
348
if args.isolated_script_test_filter:
349
filtered = []
350
for trace in traces:
351
# Apply test filter if present.
352
full_name = 'angle_restricted_trace_gold_tests.%s' % trace
353
if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter):
354
logging.info('Skipping test %s because it does not match filter %s' %
355
(full_name, args.isolated_script_test_filter))
356
else:
357
filtered += [trace]
358
traces = filtered
359
360
batches = _get_batches(traces, args.batch_size)
361
362
for batch in batches:
363
for iteration in range(0, args.flaky_retries + 1):
364
with common.temporary_file() as tempfile_path:
365
# This is how we signal early exit
366
if not batch:
367
logging.debug('All tests in batch completed.')
368
break
369
if iteration > 0:
370
logging.info('Test run failed, running retry #%d...' % iteration)
371
372
gtest_filter = _get_gtest_filter_for_batch(batch)
373
cmd = [
374
args.test_suite,
375
gtest_filter,
376
'--render-test-output-dir=%s' % screenshot_dir,
377
'--one-frame-only',
378
'--verbose-logging',
379
] + extra_flags
380
batch_result = PASS if run_wrapper(args, cmd, env,
381
tempfile_path) == 0 else FAIL
382
383
next_batch = []
384
for trace in batch:
385
artifacts = {}
386
387
if batch_result == PASS:
388
logging.debug('upload test result: %s' % trace)
389
result = upload_test_result_to_skia_gold(args, gold_session_manager,
390
gold_session, gold_properties,
391
screenshot_dir, trace,
392
artifacts)
393
else:
394
result = batch_result
395
396
expected_result = SKIP if result == SKIP else PASS
397
test_results[trace] = {'expected': expected_result, 'actual': result}
398
if len(artifacts) > 0:
399
test_results[trace]['artifacts'] = artifacts
400
if result == FAIL:
401
next_batch.append(trace)
402
batch = next_batch
403
404
# These properties are recorded after iteration to ensure they only happen once.
405
for _, trace_results in test_results.items():
406
result = trace_results['actual']
407
results['num_failures_by_type'][result] += 1
408
if result == FAIL:
409
trace_results['is_unexpected'] = True
410
411
return results['num_failures_by_type'][FAIL] == 0
412
413
414
def _shard_tests(tests, shard_count, shard_index):
415
return [tests[index] for index in range(shard_index, len(tests), shard_count)]
416
417
418
def main():
419
parser = argparse.ArgumentParser()
420
parser.add_argument('--isolated-script-test-output', type=str)
421
parser.add_argument('--isolated-script-test-perf-output', type=str)
422
parser.add_argument('--isolated-script-test-filter', type=str)
423
parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
424
parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
425
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
426
parser.add_argument(
427
'--flaky-retries', help='Number of times to retry failed tests.', type=int, default=0)
428
parser.add_argument(
429
'--shard-count',
430
help='Number of shards for test splitting. Default is 1.',
431
type=int,
432
default=1)
433
parser.add_argument(
434
'--shard-index',
435
help='Index of the current shard for test splitting. Default is 0.',
436
type=int,
437
default=0)
438
parser.add_argument(
439
'--batch-size',
440
help='Number of tests to run in a group. Default: %d' % DEFAULT_BATCH_SIZE,
441
type=int,
442
default=DEFAULT_BATCH_SIZE)
443
parser.add_argument(
444
'-l', '--log', help='Log output level. Default is %s.' % DEFAULT_LOG, default=DEFAULT_LOG)
445
446
add_skia_gold_args(parser)
447
448
args, extra_flags = parser.parse_known_args()
449
logging.basicConfig(level=args.log.upper())
450
451
env = os.environ.copy()
452
453
if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
454
if 'GTEST_SHARD_INDEX' not in env:
455
logging.error('Sharding params must be specified together.')
456
sys.exit(1)
457
args.shard_count = int(env.pop('GTEST_TOTAL_SHARDS'))
458
args.shard_index = int(env.pop('GTEST_SHARD_INDEX'))
459
460
results = {
461
'tests': {},
462
'interrupted': False,
463
'seconds_since_epoch': time.time(),
464
'path_delimiter': '.',
465
'version': 3,
466
'num_failures_by_type': {
467
FAIL: 0,
468
PASS: 0,
469
SKIP: 0,
470
},
471
}
472
473
test_results = {}
474
475
rc = 0
476
477
try:
478
if IsWindows():
479
args.test_suite = '.\\%s.exe' % args.test_suite
480
else:
481
args.test_suite = './%s' % args.test_suite
482
483
# read test set
484
json_name = os.path.join(ANGLE_SRC_DIR, 'src', 'tests', 'restricted_traces',
485
'restricted_traces.json')
486
with open(json_name) as fp:
487
tests = json.load(fp)
488
489
# Split tests according to sharding
490
sharded_tests = _shard_tests(tests['traces'], args.shard_count, args.shard_index)
491
492
if args.render_test_output_dir:
493
if not _run_tests(args, sharded_tests, extra_flags, env, args.render_test_output_dir,
494
results, test_results):
495
rc = 1
496
elif 'ISOLATED_OUTDIR' in env:
497
if not _run_tests(args, sharded_tests, extra_flags, env, env['ISOLATED_OUTDIR'],
498
results, test_results):
499
rc = 1
500
else:
501
with temporary_dir('angle_trace_') as temp_dir:
502
if not _run_tests(args, sharded_tests, extra_flags, env, temp_dir, results,
503
test_results):
504
rc = 1
505
506
except Exception:
507
traceback.print_exc()
508
results['interrupted'] = True
509
rc = 1
510
511
if test_results:
512
results['tests']['angle_restricted_trace_gold_tests'] = test_results
513
514
if args.isolated_script_test_output:
515
with open(args.isolated_script_test_output, 'w') as out_file:
516
out_file.write(json.dumps(results, indent=2))
517
518
if args.isolated_script_test_perf_output:
519
with open(args.isolated_script_test_perf_output, 'w') as out_file:
520
out_file.write(json.dumps({}))
521
522
return rc
523
524
525
# This is not really a "script test" so does not need to manually add
526
# any additional compile targets.
527
def main_compile_targets(args):
528
json.dump([], args.output)
529
530
531
if __name__ == '__main__':
532
# Conform minimally to the protocol defined by ScriptTest.
533
if 'compile_targets' in sys.argv:
534
funcs = {
535
'run': None,
536
'compile_targets': main_compile_targets,
537
}
538
sys.exit(common.run_script(sys.argv[1:], funcs))
539
sys.exit(main())
540
541