Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/angle
Path: blob/main_old/src/tests/capture_replay_tests.py
1693 views
1
#! /usr/bin/env vpython3
2
#
3
# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4
# Use of this source code is governed by a BSD-style license that can be
5
# found in the LICENSE file.
6
#
7
"""
8
Script testing capture_replay with angle_end2end_tests
9
"""
10
11
# Automation script will:
12
# 1. Build all tests in angle_end2end with frame capture enabled
13
# 2. Run each test with frame capture
14
# 3. Build CaptureReplayTest with cpp trace files
15
# 4. Run CaptureReplayTest
16
# 5. Output the number of test successes and failures. A test succeeds if no error occurs during
17
# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure
18
# will return non-zero exit code
19
20
# Run this script with Python to test capture replay on angle_end2end tests
21
# python path/to/capture_replay_tests.py
22
# Command line arguments: run with --help for a full list.
23
24
import argparse
25
import difflib
26
import distutils.util
27
import fnmatch
28
import logging
29
import math
30
import multiprocessing
31
import os
32
import psutil
33
import queue
34
import re
35
import shutil
36
import subprocess
37
import sys
38
import time
39
import traceback
40
41
PIPE_STDOUT = True
42
DEFAULT_OUT_DIR = "out/CaptureReplayTest" # relative to angle folder
43
DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader"
44
DEFAULT_TEST_SUITE = "angle_end2end_tests"
45
REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests" # relative to angle folder
46
DEFAULT_BATCH_COUNT = 8 # number of tests batched together
47
TRACE_FILE_SUFFIX = "_capture_context" # because we only deal with 1 context right now
48
RESULT_TAG = "*RESULT"
49
TIME_BETWEEN_MESSAGE = 20 # in seconds
50
SUBPROCESS_TIMEOUT = 600 # in seconds
51
DEFAULT_RESULT_FILE = "results.txt"
52
DEFAULT_LOG_LEVEL = "info"
53
DEFAULT_MAX_JOBS = 8
54
REPLAY_BINARY = "capture_replay_tests"
55
if sys.platform == "win32":
56
REPLAY_BINARY += ".exe"
57
TRACE_FOLDER = "traces"
58
59
EXIT_SUCCESS = 0
60
EXIT_FAILURE = 1
61
62
switch_case_without_return_template = """\
63
case {case}:
64
{namespace}::{call}({params});
65
break;
66
"""
67
68
switch_case_with_return_template = """\
69
case {case}:
70
return {namespace}::{call}({params});
71
"""
72
73
default_case_without_return_template = """\
74
default:
75
break;"""
76
default_case_with_return_template = """\
77
default:
78
return {default_val};"""
79
80
test_trace_info_init_template = """\
81
{{
82
"{namespace}",
83
{namespace}::kReplayContextClientMajorVersion,
84
{namespace}::kReplayContextClientMinorVersion,
85
{namespace}::kReplayPlatformType,
86
{namespace}::kReplayDeviceType,
87
{namespace}::kReplayFrameStart,
88
{namespace}::kReplayFrameEnd,
89
{namespace}::kReplayDrawSurfaceWidth,
90
{namespace}::kReplayDrawSurfaceHeight,
91
{namespace}::kDefaultFramebufferRedBits,
92
{namespace}::kDefaultFramebufferGreenBits,
93
{namespace}::kDefaultFramebufferBlueBits,
94
{namespace}::kDefaultFramebufferAlphaBits,
95
{namespace}::kDefaultFramebufferDepthBits,
96
{namespace}::kDefaultFramebufferStencilBits,
97
{namespace}::kIsBinaryDataCompressed,
98
{namespace}::kAreClientArraysEnabled,
99
{namespace}::kbindGeneratesResources,
100
{namespace}::kWebGLCompatibility,
101
{namespace}::kRobustResourceInit,
102
}},
103
"""
104
105
composite_h_file_template = """\
106
#pragma once
107
#include <vector>
108
#include <string>
109
110
{trace_headers}
111
112
struct TestTraceInfo {{
113
std::string testName;
114
uint32_t replayContextMajorVersion;
115
uint32_t replayContextMinorVersion;
116
EGLint replayPlatformType;
117
EGLint replayDeviceType;
118
uint32_t replayFrameStart;
119
uint32_t replayFrameEnd;
120
EGLint replayDrawSurfaceWidth;
121
EGLint replayDrawSurfaceHeight;
122
EGLint defaultFramebufferRedBits;
123
EGLint defaultFramebufferGreenBits;
124
EGLint defaultFramebufferBlueBits;
125
EGLint defaultFramebufferAlphaBits;
126
EGLint defaultFramebufferDepthBits;
127
EGLint defaultFramebufferStencilBits;
128
bool isBinaryDataCompressed;
129
bool areClientArraysEnabled;
130
bool bindGeneratesResources;
131
bool webGLCompatibility;
132
bool robustResourceInit;
133
}};
134
135
extern std::vector<TestTraceInfo> testTraceInfos;
136
"""
137
138
composite_cpp_file_template = """\
139
#include "{h_filename}"
140
141
std::vector<TestTraceInfo> testTraceInfos =
142
{{
143
{test_trace_info_inits}
144
}};
145
"""
146
147
148
def winext(name, ext):
149
return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name
150
151
152
def AutodetectGoma():
153
return winext('compiler_proxy', 'exe') in (p.name() for p in psutil.process_iter())
154
155
156
class SubProcess():
157
158
def __init__(self, command, logger, env=os.environ, pipe_stdout=PIPE_STDOUT):
159
# shell=False so that only 1 subprocess is spawned.
160
# if shell=True, a shell process is spawned, which in turn spawns the process running
161
# the command. Since we do not have a handle to the 2nd process, we cannot terminate it.
162
if pipe_stdout:
163
self.proc_handle = subprocess.Popen(
164
command, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
165
else:
166
self.proc_handle = subprocess.Popen(command, env=env, shell=False)
167
self._logger = logger
168
169
def Join(self, timeout):
170
self._logger.debug('Joining with subprocess %d, timeout %s' % (self.Pid(), str(timeout)))
171
output = self.proc_handle.communicate(timeout=timeout)[0]
172
if output:
173
output = output.decode('utf-8')
174
else:
175
output = ''
176
return self.proc_handle.returncode, output
177
178
def Pid(self):
179
return self.proc_handle.pid
180
181
def Kill(self):
182
self.proc_handle.terminate()
183
self.proc_handle.wait()
184
185
186
# class that manages all child processes of a process. Any process thats spawns subprocesses
187
# should have this. This object is created inside the main process, and each worker process.
188
class ChildProcessesManager():
189
190
@classmethod
191
def _GetGnAndNinjaAbsolutePaths(self):
192
path = os.path.join('third_party', 'depot_tools')
193
return os.path.join(path, winext('gn', 'bat')), os.path.join(path, winext('ninja', 'exe'))
194
195
def __init__(self, logger, ninja_lock):
196
# a dictionary of Subprocess, with pid as key
197
self.subprocesses = {}
198
# list of Python multiprocess.Process handles
199
self.workers = []
200
201
self._gn_path, self._ninja_path = self._GetGnAndNinjaAbsolutePaths()
202
self._use_goma = AutodetectGoma()
203
self._logger = logger
204
self._ninja_lock = ninja_lock
205
206
def RunSubprocess(self, command, env=None, pipe_stdout=True, timeout=None):
207
proc = SubProcess(command, self._logger, env, pipe_stdout)
208
self._logger.debug('Created subprocess: %s with pid %d' % (' '.join(command), proc.Pid()))
209
self.subprocesses[proc.Pid()] = proc
210
try:
211
returncode, output = self.subprocesses[proc.Pid()].Join(timeout)
212
self.RemoveSubprocess(proc.Pid())
213
if returncode != 0:
214
return -1, output
215
return returncode, output
216
except KeyboardInterrupt:
217
raise
218
except subprocess.TimeoutExpired as e:
219
self.RemoveSubprocess(proc.Pid())
220
return -2, str(e)
221
except Exception as e:
222
self.RemoveSubprocess(proc.Pid())
223
return -1, str(e)
224
225
def RemoveSubprocess(self, subprocess_id):
226
assert subprocess_id in self.subprocesses
227
self.subprocesses[subprocess_id].Kill()
228
del self.subprocesses[subprocess_id]
229
230
def AddWorker(self, worker):
231
self.workers.append(worker)
232
233
def KillAll(self):
234
for subprocess_id in self.subprocesses:
235
self.subprocesses[subprocess_id].Kill()
236
for worker in self.workers:
237
worker.terminate()
238
worker.join()
239
worker.close() # to release file descriptors immediately
240
self.subprocesses = {}
241
self.workers = []
242
243
def JoinWorkers(self):
244
for worker in self.workers:
245
worker.join()
246
worker.close()
247
self.workers = []
248
249
def IsAnyWorkerAlive(self):
250
return any([worker.is_alive() for worker in self.workers])
251
252
def GetRemainingWorkers(self):
253
count = 0
254
for worker in self.workers:
255
if worker.is_alive():
256
count += 1
257
return count
258
259
def RunGNGen(self, args, build_dir, pipe_stdout, extra_gn_args=[]):
260
gn_args = [('angle_with_capture_by_default', 'true')] + extra_gn_args
261
if self._use_goma:
262
gn_args.append(('use_goma', 'true'))
263
if args.goma_dir:
264
gn_args.append(('goma_dir', '"%s"' % args.goma_dir))
265
if not args.debug:
266
gn_args.append(('is_debug', 'false'))
267
gn_args.append(('symbol_level', '1'))
268
gn_args.append(('angle_assert_always_on', 'true'))
269
if args.asan:
270
gn_args.append(('is_asan', 'true'))
271
args_str = ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args])
272
self._logger.info('Calling gn gen --args="%s"' % args_str)
273
cmd = [self._gn_path, 'gen', '--args=%s' % args_str, build_dir]
274
return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
275
276
def RunNinja(self, args, build_dir, target, pipe_stdout):
277
cmd = [self._ninja_path]
278
279
# This code is taken from depot_tools/autoninja.py
280
if self._use_goma:
281
num_cores = multiprocessing.cpu_count()
282
cmd.append('-j')
283
core_multiplier = 40
284
j_value = num_cores * core_multiplier
285
286
if sys.platform.startswith('win'):
287
# On windows, j value higher than 1000 does not improve build performance.
288
j_value = min(j_value, 1000)
289
elif sys.platform == 'darwin':
290
# On Mac, j value higher than 500 causes 'Too many open files' error
291
# (crbug.com/936864).
292
j_value = min(j_value, 500)
293
294
cmd.append('%d' % j_value)
295
else:
296
cmd.append('-l')
297
cmd.append('%d' % os.cpu_count())
298
299
cmd += ['-C', build_dir, target]
300
with self._ninja_lock:
301
self._logger.info('Running %s' % ' '.join(cmd))
302
return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
303
304
305
def GetTestsListForFilter(args, test_path, filter, logger):
306
cmd = GetRunCommand(args, test_path) + ["--list-tests", "--gtest_filter=%s" % filter]
307
logger.info('Getting test list from "%s"' % " ".join(cmd))
308
return subprocess.check_output(cmd, text=True)
309
310
311
def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests,
312
logger):
313
output_lines = output.splitlines()
314
tests = []
315
seen_start_of_tests = False
316
disabled = 0
317
for line in output_lines:
318
l = line.strip()
319
if l == 'Tests list:':
320
seen_start_of_tests = True
321
elif l == 'End tests list.':
322
break
323
elif not seen_start_of_tests:
324
pass
325
elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests:
326
tests.append(l)
327
else:
328
disabled += 1
329
330
logger.info('Found %s tests and %d disabled tests.' % (len(tests), disabled))
331
return tests
332
333
334
def GetRunCommand(args, command):
335
if args.xvfb:
336
return ['vpython', 'testing/xvfb.py', command]
337
else:
338
return [command]
339
340
341
class GroupedResult():
342
Passed = "Pass"
343
Failed = "Fail"
344
TimedOut = "Timeout"
345
Crashed = "Crashed"
346
CompileFailed = "CompileFailed"
347
Skipped = "Skipped"
348
349
ResultTypes = [Passed, Failed, TimedOut, Crashed, CompileFailed, Skipped]
350
351
def __init__(self, resultcode, message, output, tests):
352
self.resultcode = resultcode
353
self.message = message
354
self.output = output
355
self.tests = []
356
for test in tests:
357
self.tests.append(test)
358
359
360
class TestBatchResult():
361
362
display_output_lines = 20
363
364
def __init__(self, grouped_results, verbose):
365
self.results = {}
366
for result_type in GroupedResult.ResultTypes:
367
self.results[result_type] = []
368
369
for grouped_result in grouped_results:
370
for test in grouped_result.tests:
371
self.results[grouped_result.resultcode].append(test.full_test_name)
372
373
self.repr_str = ""
374
self.GenerateRepresentationString(grouped_results, verbose)
375
376
def __str__(self):
377
return self.repr_str
378
379
def GenerateRepresentationString(self, grouped_results, verbose):
380
for grouped_result in grouped_results:
381
self.repr_str += grouped_result.resultcode + ": " + grouped_result.message + "\n"
382
for test in grouped_result.tests:
383
self.repr_str += "\t" + test.full_test_name + "\n"
384
if verbose:
385
self.repr_str += grouped_result.output
386
else:
387
if grouped_result.resultcode == GroupedResult.CompileFailed:
388
self.repr_str += TestBatchResult.ExtractErrors(grouped_result.output)
389
elif grouped_result.resultcode != GroupedResult.Passed:
390
self.repr_str += TestBatchResult.GetAbbreviatedOutput(grouped_result.output)
391
392
def ExtractErrors(output):
393
lines = output.splitlines()
394
error_lines = []
395
for i in range(len(lines)):
396
if ": error:" in lines[i]:
397
error_lines.append(lines[i] + "\n")
398
if i + 1 < len(lines):
399
error_lines.append(lines[i + 1] + "\n")
400
return "".join(error_lines)
401
402
def GetAbbreviatedOutput(output):
403
# Get all lines after and including the last occurance of "Run".
404
lines = output.splitlines()
405
line_count = 0
406
for line_index in reversed(range(len(lines))):
407
line_count += 1
408
if "[ RUN ]" in lines[line_index]:
409
break
410
411
return '\n' + '\n'.join(lines[-line_count:]) + '\n'
412
413
414
class Test():
415
416
def __init__(self, test_name):
417
self.full_test_name = test_name
418
self.params = test_name.split('/')[1]
419
self.context_id = 0
420
self.test_index = -1 # index of test within a test batch
421
self._label = self.full_test_name.replace(".", "_").replace("/", "_")
422
423
def __str__(self):
424
return self.full_test_name + " Params: " + self.params
425
426
def GetLabel(self):
427
return self._label
428
429
def CanRunReplay(self, trace_folder_path):
430
test_files = []
431
label = self.GetLabel() + "_capture"
432
assert (self.context_id == 0)
433
for f in os.listdir(trace_folder_path):
434
if os.path.isfile(os.path.join(trace_folder_path, f)) and f.startswith(label):
435
test_files.append(f)
436
frame_files_count = 0
437
context_header_count = 0
438
context_source_count = 0
439
source_txt_count = 0
440
context_id = 0
441
for f in test_files:
442
if "_frame" in f:
443
frame_files_count += 1
444
elif f.endswith(".txt"):
445
source_txt_count += 1
446
elif f.endswith(".h"):
447
context_header_count += 1
448
if TRACE_FILE_SUFFIX in f:
449
context = f.split(TRACE_FILE_SUFFIX)[1][:-2]
450
context_id = int(context)
451
elif f.endswith(".cpp"):
452
context_source_count += 1
453
can_run_replay = frame_files_count >= 1 and context_header_count >= 1 \
454
and context_source_count >= 1 and source_txt_count == 1
455
if not can_run_replay:
456
return False
457
self.context_id = context_id
458
return True
459
460
461
class TestBatch():
462
463
CAPTURE_FRAME_END = 100
464
465
def __init__(self, args, logger):
466
self.args = args
467
self.tests = []
468
self.results = []
469
self.logger = logger
470
471
def SetWorkerId(self, worker_id):
472
self.trace_dir = "%s%d" % (TRACE_FOLDER, worker_id)
473
self.trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, self.trace_dir)
474
475
def RunWithCapture(self, args, child_processes_manager):
476
test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite)
477
478
extra_env = {
479
'ANGLE_CAPTURE_FRAME_END': '{}'.format(self.CAPTURE_FRAME_END),
480
'ANGLE_CAPTURE_SERIALIZE_STATE': '1',
481
'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables',
482
'ANGLE_CAPTURE_ENABLED': '1',
483
'ANGLE_CAPTURE_OUT_DIR': self.trace_folder_path,
484
}
485
486
env = {**os.environ.copy(), **extra_env}
487
488
if not self.args.keep_temp_files:
489
ClearFolderContent(self.trace_folder_path)
490
filt = ':'.join([test.full_test_name for test in self.tests])
491
492
cmd = GetRunCommand(args, test_exe_path)
493
cmd += ['--gtest_filter=%s' % filt, '--angle-per-test-capture-label']
494
self.logger.info("Run capture: '{}' with env {}".format(' '.join(cmd), str(extra_env)))
495
496
returncode, output = child_processes_manager.RunSubprocess(
497
cmd, env, timeout=SUBPROCESS_TIMEOUT)
498
if args.show_capture_stdout:
499
self.logger.info("Capture stdout: %s" % output)
500
if returncode == -1:
501
self.results.append(GroupedResult(GroupedResult.Crashed, "", output, self.tests))
502
return False
503
elif returncode == -2:
504
self.results.append(GroupedResult(GroupedResult.TimedOut, "", "", self.tests))
505
return False
506
return True
507
508
def RemoveTestsThatDoNotProduceAppropriateTraceFiles(self):
509
continued_tests = []
510
skipped_tests = []
511
for test in self.tests:
512
if not test.CanRunReplay(self.trace_folder_path):
513
skipped_tests.append(test)
514
else:
515
continued_tests.append(test)
516
if len(skipped_tests) > 0:
517
self.results.append(
518
GroupedResult(
519
GroupedResult.Skipped,
520
"Skipping replay since capture didn't produce necessary trace files", "",
521
skipped_tests))
522
return continued_tests
523
524
def BuildReplay(self, replay_build_dir, composite_file_id, tests, child_processes_manager):
525
# write gni file that holds all the traces files in a list
526
self.CreateGNIFile(composite_file_id, tests)
527
# write header and cpp composite files, which glue the trace files with
528
# CaptureReplayTests.cpp
529
self.CreateTestsCompositeFiles(composite_file_id, tests)
530
531
gn_args = [('angle_build_capture_replay_tests', 'true'),
532
('angle_capture_replay_test_trace_dir', '"%s"' % self.trace_dir),
533
('angle_capture_replay_composite_file_id', str(composite_file_id))]
534
returncode, output = child_processes_manager.RunGNGen(self.args, replay_build_dir, True,
535
gn_args)
536
if returncode != 0:
537
self.results.append(
538
GroupedResult(GroupedResult.CompileFailed, "Build replay failed at gn generation",
539
output, tests))
540
return False
541
returncode, output = child_processes_manager.RunNinja(self.args, replay_build_dir,
542
REPLAY_BINARY, True)
543
if returncode != 0:
544
self.logger.warning('Ninja failure output: %s' % output)
545
self.results.append(
546
GroupedResult(GroupedResult.CompileFailed, "Build replay failed at ninja", output,
547
tests))
548
return False
549
return True
550
551
def RunReplay(self, replay_build_dir, replay_exe_path, child_processes_manager, tests):
552
extra_env = {
553
'ANGLE_CAPTURE_ENABLED': '0',
554
'ANGLE_FEATURE_OVERRIDES_ENABLED': 'enable_capture_limits',
555
}
556
env = {**os.environ.copy(), **extra_env}
557
558
self.logger.info("Run Replay: {} with env {}".format(replay_exe_path, str(extra_env)))
559
560
returncode, output = child_processes_manager.RunSubprocess(
561
GetRunCommand(self.args, replay_exe_path), env, timeout=SUBPROCESS_TIMEOUT)
562
if returncode == -1:
563
cmd = replay_exe_path
564
self.results.append(
565
GroupedResult(GroupedResult.Crashed, "Replay run crashed (%s)" % cmd, output,
566
tests))
567
return
568
elif returncode == -2:
569
self.results.append(
570
GroupedResult(GroupedResult.TimedOut, "Replay run timed out", output, tests))
571
return
572
573
output_lines = output.splitlines()
574
passes = []
575
fails = []
576
count = 0
577
for output_line in output_lines:
578
words = output_line.split(" ")
579
if len(words) == 3 and words[0] == RESULT_TAG:
580
if int(words[2]) == 0:
581
passes.append(self.FindTestByLabel(words[1]))
582
else:
583
fails.append(self.FindTestByLabel(words[1]))
584
self.logger.info("Context comparison failed: {}".format(
585
self.FindTestByLabel(words[1])))
586
self.PrintContextDiff(replay_build_dir, words[1])
587
588
count += 1
589
if len(passes) > 0:
590
self.results.append(GroupedResult(GroupedResult.Passed, "", output, passes))
591
if len(fails) > 0:
592
self.results.append(GroupedResult(GroupedResult.Failed, "", output, fails))
593
594
def PrintContextDiff(self, replay_build_dir, test_name):
595
frame = 1
596
while True:
597
capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name,
598
frame)
599
replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)
600
if os.path.exists(capture_file) and os.path.exists(replay_file):
601
captured_context = open(capture_file, "r").readlines()
602
replayed_context = open(replay_file, "r").readlines()
603
for line in difflib.unified_diff(
604
captured_context, replayed_context, fromfile=capture_file,
605
tofile=replay_file):
606
print(line, end="")
607
else:
608
if frame > self.CAPTURE_FRAME_END:
609
break
610
frame = frame + 1
611
612
def FindTestByLabel(self, label):
613
for test in self.tests:
614
if test.GetLabel() == label:
615
return test
616
return None
617
618
def AddTest(self, test):
619
assert len(self.tests) <= self.args.batch_count
620
test.index = len(self.tests)
621
self.tests.append(test)
622
623
# gni file, which holds all the sources for a replay application
624
def CreateGNIFile(self, composite_file_id, tests):
625
test_list = []
626
for test in tests:
627
label = test.GetLabel()
628
assert (test.context_id > 0)
629
630
fname = "%s%s%d_files.txt" % (label, TRACE_FILE_SUFFIX, test.context_id)
631
fpath = os.path.join(self.trace_folder_path, fname)
632
with open(fpath) as f:
633
files = f.readlines()
634
f.close()
635
files = ['"%s/%s"' % (self.trace_dir, file.strip()) for file in files]
636
angledata = "%s%s.angledata.gz" % (label, TRACE_FILE_SUFFIX)
637
test_list += [
638
'["%s", %s, [%s], "%s"]' % (label, test.context_id, ','.join(files), angledata)
639
]
640
gni_path = os.path.join(self.trace_folder_path, "traces%d.gni" % composite_file_id)
641
with open(gni_path, "w") as f:
642
f.write("trace_data = [\n%s\n]\n" % ',\n'.join(test_list))
643
f.close()
644
645
# header and cpp composite files, which glue the trace files with CaptureReplayTests.cpp
646
def CreateTestsCompositeFiles(self, composite_file_id, tests):
647
# write CompositeTests header file
648
include_header_template = '#include "{header_file_path}.h"\n'
649
trace_headers = "".join([
650
include_header_template.format(header_file_path=test.GetLabel() + TRACE_FILE_SUFFIX +
651
str(test.context_id)) for test in tests
652
])
653
654
h_filename = "CompositeTests%d.h" % composite_file_id
655
with open(os.path.join(self.trace_folder_path, h_filename), "w") as h_file:
656
h_file.write(composite_h_file_template.format(trace_headers=trace_headers))
657
h_file.close()
658
659
# write CompositeTests cpp file
660
test_trace_info_inits = "".join([
661
test_trace_info_init_template.format(namespace=tests[i].GetLabel())
662
for i in range(len(tests))
663
])
664
665
cpp_filename = "CompositeTests%d.cpp" % composite_file_id
666
with open(os.path.join(self.trace_folder_path, cpp_filename), "w") as cpp_file:
667
cpp_file.write(
668
composite_cpp_file_template.format(
669
h_filename=h_filename, test_trace_info_inits=test_trace_info_inits))
670
cpp_file.close()
671
672
def __str__(self):
673
repr_str = "TestBatch:\n"
674
for test in self.tests:
675
repr_str += ("\t" + str(test) + "\n")
676
return repr_str
677
678
def __getitem__(self, index):
679
assert index < len(self.tests)
680
return self.tests[index]
681
682
def __iter__(self):
683
return iter(self.tests)
684
685
def GetResults(self):
686
return TestBatchResult(self.results, self.args.verbose)
687
688
689
class TestExpectation():
690
# tests that must not be run as list
691
skipped_for_capture_tests = []
692
693
# test expectations for tests that do not pass
694
non_pass_results = {}
695
696
flaky_tests = []
697
698
non_pass_re = {}
699
700
# yapf: disable
701
# we want each pair on one line
702
result_map = { "FAIL" : GroupedResult.Failed,
703
"TIMEOUT" : GroupedResult.TimedOut,
704
"CRASHED" : GroupedResult.Crashed,
705
"COMPILE_FAILED" : GroupedResult.CompileFailed,
706
"SKIPPED_BY_GTEST" : GroupedResult.Skipped,
707
"PASS" : GroupedResult.Passed}
708
# yapf: enable
709
710
def __init__(self, args):
711
expected_results_filename = "capture_replay_expectations.txt"
712
expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename)
713
self._asan = args.asan
714
with open(expected_results_path, "rt") as f:
715
for line in f:
716
l = line.strip()
717
if l != "" and not l.startswith("#"):
718
self.ReadOneExpectation(l, args.debug)
719
720
def _CheckTagsWithConfig(self, tags, config_tags):
721
for tag in tags:
722
if tag not in config_tags:
723
return False
724
return True
725
726
def ReadOneExpectation(self, line, is_debug):
727
(testpattern, result) = line.split('=')
728
(test_info_string, test_name_string) = testpattern.split(':')
729
test_name = test_name_string.strip()
730
test_info = test_info_string.strip().split()
731
result_stripped = result.strip()
732
733
tags = []
734
if len(test_info) > 1:
735
tags = test_info[1:]
736
737
config_tags = [GetPlatformForSkip()]
738
if self._asan:
739
config_tags += ['ASAN']
740
if is_debug:
741
config_tags += ['DEBUG']
742
743
if self._CheckTagsWithConfig(tags, config_tags):
744
test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$')
745
if result_stripped == 'SKIP_FOR_CAPTURE':
746
self.skipped_for_capture_tests.append(test_name_regex)
747
elif result_stripped == 'FLAKY':
748
self.flaky_tests.append(test_name_regex)
749
else:
750
self.non_pass_results[test_name] = self.result_map[result_stripped]
751
self.non_pass_re[test_name] = test_name_regex
752
753
def TestIsSkippedForCapture(self, test_name):
754
for p in self.skipped_for_capture_tests:
755
m = p.match(test_name)
756
if m is not None:
757
return True
758
return False
759
760
def Filter(self, test_list, run_all_tests):
761
result = {}
762
for t in test_list:
763
for key in self.non_pass_results.keys():
764
if self.non_pass_re[key].match(t) is not None:
765
result[t] = self.non_pass_results[key]
766
if run_all_tests:
767
for skip in self.skipped_for_capture_tests:
768
if skip.match(t) is not None:
769
result[t] = "'forced skip'"
770
return result
771
772
def IsFlaky(self, test_name):
773
for flaky in self.flaky_tests:
774
if flaky.match(test_name) is not None:
775
return True
776
return False
777
778
779
def ClearFolderContent(path):
780
all_files = []
781
for f in os.listdir(path):
782
if os.path.isfile(os.path.join(path, f)):
783
os.remove(os.path.join(path, f))
784
785
def SetCWDToAngleFolder():
786
cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
787
os.chdir(cwd)
788
return cwd
789
790
791
def RunTests(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock):
792
replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % worker_id)
793
replay_exec_path = os.path.join(replay_build_dir, REPLAY_BINARY)
794
795
child_processes_manager = ChildProcessesManager(logger, ninja_lock)
796
# used to differentiate between multiple composite files when there are multiple test batchs
797
# running on the same worker and --deleted_trace is set to False
798
composite_file_id = 1
799
while not job_queue.empty():
800
try:
801
test_batch = job_queue.get()
802
message_queue.put("Starting {} tests on worker {}. Unstarted jobs: {}".format(
803
len(test_batch.tests), worker_id, job_queue.qsize()))
804
805
test_batch.SetWorkerId(worker_id)
806
807
success = test_batch.RunWithCapture(args, child_processes_manager)
808
if not success:
809
result_list.append(test_batch.GetResults())
810
message_queue.put(str(test_batch.GetResults()))
811
continue
812
continued_tests = test_batch.RemoveTestsThatDoNotProduceAppropriateTraceFiles()
813
if len(continued_tests) == 0:
814
result_list.append(test_batch.GetResults())
815
message_queue.put(str(test_batch.GetResults()))
816
continue
817
success = test_batch.BuildReplay(replay_build_dir, composite_file_id, continued_tests,
818
child_processes_manager)
819
if args.keep_temp_files:
820
composite_file_id += 1
821
if not success:
822
result_list.append(test_batch.GetResults())
823
message_queue.put(str(test_batch.GetResults()))
824
continue
825
test_batch.RunReplay(replay_build_dir, replay_exec_path, child_processes_manager,
826
continued_tests)
827
result_list.append(test_batch.GetResults())
828
message_queue.put(str(test_batch.GetResults()))
829
except KeyboardInterrupt:
830
child_processes_manager.KillAll()
831
raise
832
except queue.Empty:
833
child_processes_manager.KillAll()
834
break
835
except Exception as e:
836
message_queue.put("RunTestsException: %s\n%s" % (repr(e), traceback.format_exc()))
837
child_processes_manager.KillAll()
838
pass
839
child_processes_manager.KillAll()
840
841
842
def SafeDeleteFolder(folder_name):
843
while os.path.isdir(folder_name):
844
try:
845
shutil.rmtree(folder_name)
846
except KeyboardInterrupt:
847
raise
848
except PermissionError:
849
pass
850
851
852
def DeleteReplayBuildFolders(folder_num, replay_build_dir, trace_folder):
853
for i in range(folder_num):
854
folder_name = replay_build_dir + str(i)
855
if os.path.isdir(folder_name):
856
SafeDeleteFolder(folder_name)
857
858
859
def CreateTraceFolders(folder_num):
860
for i in range(folder_num):
861
folder_name = TRACE_FOLDER + str(i)
862
folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
863
if os.path.isdir(folder_path):
864
shutil.rmtree(folder_path)
865
os.makedirs(folder_path)
866
867
868
def DeleteTraceFolders(folder_num):
869
for i in range(folder_num):
870
folder_name = TRACE_FOLDER + str(i)
871
folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
872
if os.path.isdir(folder_path):
873
SafeDeleteFolder(folder_path)
874
875
876
def GetPlatformForSkip():
877
# yapf: disable
878
# we want each pair on one line
879
platform_map = { "win32" : "WIN",
880
"linux" : "LINUX" }
881
# yapf: enable
882
return platform_map.get(sys.platform, "UNKNOWN")
883
884
885
def main(args):
886
logger = multiprocessing.log_to_stderr()
887
logger.setLevel(level=args.log.upper())
888
889
ninja_lock = multiprocessing.Lock()
890
child_processes_manager = ChildProcessesManager(logger, ninja_lock)
891
try:
892
start_time = time.time()
893
# set the number of workers to be cpu_count - 1 (since the main process already takes up a
894
# CPU core). Whenever a worker is available, it grabs the next job from the job queue and
895
# runs it. The worker closes down when there is no more job.
896
worker_count = min(multiprocessing.cpu_count() - 1, args.max_jobs)
897
cwd = SetCWDToAngleFolder()
898
899
CreateTraceFolders(worker_count)
900
capture_build_dir = os.path.normpath(r"%s/Capture" % args.out_dir)
901
returncode, output = child_processes_manager.RunGNGen(args, capture_build_dir, False)
902
if returncode != 0:
903
logger.error(output)
904
child_processes_manager.KillAll()
905
return EXIT_FAILURE
906
# run ninja to build all tests
907
returncode, output = child_processes_manager.RunNinja(args, capture_build_dir,
908
args.test_suite, False)
909
if returncode != 0:
910
logger.error(output)
911
child_processes_manager.KillAll()
912
return EXIT_FAILURE
913
# get a list of tests
914
test_path = os.path.join(capture_build_dir, args.test_suite)
915
test_list = GetTestsListForFilter(args, test_path, args.filter, logger)
916
test_expectation = TestExpectation(args)
917
test_names = ParseTestNamesFromTestList(test_list, test_expectation,
918
args.also_run_skipped_for_capture_tests, logger)
919
test_expectation_for_list = test_expectation.Filter(
920
test_names, args.also_run_skipped_for_capture_tests)
921
# objects created by manager can be shared by multiple processes. We use it to create
922
# collections that are shared by multiple processes such as job queue or result list.
923
manager = multiprocessing.Manager()
924
job_queue = manager.Queue()
925
test_batch_num = int(math.ceil(len(test_names) / float(args.batch_count)))
926
927
# put the test batchs into the job queue
928
for batch_index in range(test_batch_num):
929
batch = TestBatch(args, logger)
930
test_index = batch_index
931
while test_index < len(test_names):
932
batch.AddTest(Test(test_names[test_index]))
933
test_index += test_batch_num
934
job_queue.put(batch)
935
936
passed_count = 0
937
failed_count = 0
938
timedout_count = 0
939
crashed_count = 0
940
compile_failed_count = 0
941
skipped_count = 0
942
943
unexpected_count = {}
944
unexpected_test_results = {}
945
946
for type in GroupedResult.ResultTypes:
947
unexpected_count[type] = 0
948
unexpected_test_results[type] = []
949
950
# result list is created by manager and can be shared by multiple processes. Each
951
# subprocess populates the result list with the results of its test runs. After all
952
# subprocesses finish, the main process processes the results in the result list.
953
# An item in the result list is a tuple with 3 values (testname, result, output).
954
# The "result" can take 3 values "Passed", "Failed", "Skipped". The output is the
955
# stdout and the stderr of the test appended together.
956
result_list = manager.list()
957
message_queue = manager.Queue()
958
# so that we do not spawn more processes than we actually need
959
worker_count = min(worker_count, test_batch_num)
960
# spawning and starting up workers
961
for worker_id in range(worker_count):
962
proc = multiprocessing.Process(
963
target=RunTests,
964
args=(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock))
965
child_processes_manager.AddWorker(proc)
966
proc.start()
967
968
# print out messages from the message queue populated by workers
969
# if there is no message, and the elapsed time between now and when the last message is
970
# print exceeds TIME_BETWEEN_MESSAGE, prints out a message to signal that tests are still
971
# running
972
last_message_timestamp = 0
973
while child_processes_manager.IsAnyWorkerAlive():
974
while not message_queue.empty():
975
msg = message_queue.get()
976
logger.info(msg)
977
last_message_timestamp = time.time()
978
cur_time = time.time()
979
if cur_time - last_message_timestamp > TIME_BETWEEN_MESSAGE:
980
last_message_timestamp = cur_time
981
logger.info("Tests are still running. Remaining workers: " + \
982
str(child_processes_manager.GetRemainingWorkers()) + \
983
". Unstarted jobs: " + str(job_queue.qsize()))
984
time.sleep(1.0)
985
child_processes_manager.JoinWorkers()
986
while not message_queue.empty():
987
msg = message_queue.get()
988
logger.warning(msg)
989
end_time = time.time()
990
991
# print out results
992
logger.info("\n\n\n")
993
logger.info("Results:")
994
995
flaky_results = []
996
997
for test_batch in result_list:
998
test_batch_result = test_batch.results
999
logger.debug(str(test_batch_result))
1000
1001
passed_count += len(test_batch_result[GroupedResult.Passed])
1002
failed_count += len(test_batch_result[GroupedResult.Failed])
1003
timedout_count += len(test_batch_result[GroupedResult.TimedOut])
1004
crashed_count += len(test_batch_result[GroupedResult.Crashed])
1005
compile_failed_count += len(test_batch_result[GroupedResult.CompileFailed])
1006
skipped_count += len(test_batch_result[GroupedResult.Skipped])
1007
1008
for real_result, test_list in test_batch_result.items():
1009
for test in test_list:
1010
if test_expectation.IsFlaky(test):
1011
flaky_results.append("{} ({})".format(test, real_result))
1012
continue
1013
1014
# Passing tests are not in the list
1015
if test not in test_expectation_for_list.keys():
1016
if real_result != GroupedResult.Passed:
1017
unexpected_count[real_result] += 1
1018
unexpected_test_results[real_result].append(
1019
"{} {} (expected Pass or is new test)".format(test, real_result))
1020
else:
1021
expected_result = test_expectation_for_list[test]
1022
if real_result != expected_result:
1023
unexpected_count[real_result] += 1
1024
unexpected_test_results[real_result].append(
1025
"{} {} (expected {})".format(test, real_result, expected_result))
1026
1027
logger.info("")
1028
logger.info("Elapsed time: %.2lf seconds" % (end_time - start_time))
1029
logger.info("")
1030
1031
if len(flaky_results):
1032
logger.info("Flaky test(s):")
1033
for line in flaky_results:
1034
logger.info(" {}".format(line))
1035
logger.info("")
1036
1037
logger.info(
1038
"Summary: Passed: %d, Comparison Failed: %d, Crashed: %d, CompileFailed %d, Skipped: %d, Timeout: %d"
1039
% (passed_count, failed_count, crashed_count, compile_failed_count, skipped_count,
1040
timedout_count))
1041
1042
retval = EXIT_SUCCESS
1043
1044
unexpected_test_results_count = 0
1045
for count in unexpected_count.values():
1046
unexpected_test_results_count += count
1047
1048
if unexpected_test_results_count > 0:
1049
retval = EXIT_FAILURE
1050
logger.info("")
1051
logger.info("Failure: Obtained {} results that differ from expectation:".format(
1052
unexpected_test_results_count))
1053
logger.info("")
1054
for result, count in unexpected_count.items():
1055
if count > 0:
1056
logger.info("Unexpected '{}' ({}):".format(result, count))
1057
for test_result in unexpected_test_results[result]:
1058
logger.info(" {}".format(test_result))
1059
logger.info("")
1060
1061
logger.info("\n\n")
1062
1063
# delete generated folders if --keep_temp_files flag is set to false
1064
if args.purge:
1065
DeleteTraceFolders(worker_count)
1066
if os.path.isdir(args.out_dir):
1067
SafeDeleteFolder(args.out_dir)
1068
1069
# Try hard to ensure output is finished before ending the test.
1070
logging.shutdown()
1071
sys.stdout.flush()
1072
time.sleep(2.0)
1073
return retval
1074
1075
except KeyboardInterrupt:
1076
child_processes_manager.KillAll()
1077
return EXIT_FAILURE
1078
1079
1080
if __name__ == '__main__':
1081
parser = argparse.ArgumentParser()
1082
parser.add_argument(
1083
'--out-dir',
1084
default=DEFAULT_OUT_DIR,
1085
help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".'
1086
% DEFAULT_OUT_DIR)
1087
# TODO(jmadill): Remove this argument. http://anglebug.com/6102
1088
parser.add_argument(
1089
'--use-goma',
1090
action='store_true',
1091
help='Use goma for distributed builds. Requires internal access. Off by default.')
1092
parser.add_argument(
1093
'-f',
1094
'--filter',
1095
'--gtest_filter',
1096
default=DEFAULT_FILTER,
1097
help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER)
1098
parser.add_argument(
1099
'--test-suite',
1100
default=DEFAULT_TEST_SUITE,
1101
help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE)
1102
parser.add_argument(
1103
'--batch-count',
1104
default=DEFAULT_BATCH_COUNT,
1105
type=int,
1106
help='Number of tests in a batch. Default is %d.' % DEFAULT_BATCH_COUNT)
1107
parser.add_argument(
1108
'--keep-temp-files',
1109
action='store_true',
1110
help='Whether to keep the temp files and folders. Off by default')
1111
parser.add_argument('--purge', help='Purge all build directories on exit.')
1112
parser.add_argument(
1113
'--goma-dir',
1114
default='',
1115
help='Set custom goma directory. Uses the goma in path by default.')
1116
parser.add_argument(
1117
'--output-to-file',
1118
action='store_true',
1119
help='Whether to write output to a result file. Off by default')
1120
parser.add_argument(
1121
'--result-file',
1122
default=DEFAULT_RESULT_FILE,
1123
help='Name of the result file in the capture_replay_tests folder. Default is "%s".' %
1124
DEFAULT_RESULT_FILE)
1125
parser.add_argument('-v', "--verbose", action='store_true', help='Shows full test output.')
1126
parser.add_argument(
1127
'-l',
1128
'--log',
1129
default=DEFAULT_LOG_LEVEL,
1130
help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL)
1131
parser.add_argument(
1132
'-j',
1133
'--max-jobs',
1134
default=DEFAULT_MAX_JOBS,
1135
type=int,
1136
help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS)
1137
parser.add_argument(
1138
'-a',
1139
'--also-run-skipped-for-capture-tests',
1140
action='store_true',
1141
help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE')
1142
1143
# TODO(jmadill): Remove this argument. http://anglebug.com/6102
1144
parser.add_argument('--depot-tools-path', default=None, help='Path to depot tools')
1145
parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.')
1146
parser.add_argument('--asan', action='store_true', help='Build with ASAN.')
1147
parser.add_argument(
1148
'--show-capture-stdout', action='store_true', help='Print test stdout during capture.')
1149
parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).')
1150
args = parser.parse_args()
1151
if args.debug and (args.out_dir == DEFAULT_OUT_DIR):
1152
args.out_dir = args.out_dir + "Debug"
1153
1154
if sys.platform == "win32":
1155
args.test_suite += ".exe"
1156
if args.output_to_file:
1157
logging.basicConfig(level=args.log.upper(), filename=args.result_file)
1158
else:
1159
logging.basicConfig(level=args.log.upper())
1160
1161
sys.exit(main(args))
1162
1163