Path: blob/main_old/src/tests/capture_replay_tests.py
1693 views
#! /usr/bin/env vpython31#2# Copyright 2020 The ANGLE Project Authors. All rights reserved.3# Use of this source code is governed by a BSD-style license that can be4# found in the LICENSE file.5#6"""7Script testing capture_replay with angle_end2end_tests8"""910# Automation script will:11# 1. Build all tests in angle_end2end with frame capture enabled12# 2. Run each test with frame capture13# 3. Build CaptureReplayTest with cpp trace files14# 4. Run CaptureReplayTest15# 5. Output the number of test successes and failures. A test succeeds if no error occurs during16# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure17# will return non-zero exit code1819# Run this script with Python to test capture replay on angle_end2end tests20# python path/to/capture_replay_tests.py21# Command line arguments: run with --help for a full list.2223import argparse24import difflib25import distutils.util26import fnmatch27import logging28import math29import multiprocessing30import os31import psutil32import queue33import re34import shutil35import subprocess36import sys37import time38import traceback3940PIPE_STDOUT = True41DEFAULT_OUT_DIR = "out/CaptureReplayTest" # relative to angle folder42DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader"43DEFAULT_TEST_SUITE = "angle_end2end_tests"44REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests" # relative to angle folder45DEFAULT_BATCH_COUNT = 8 # number of tests batched together46TRACE_FILE_SUFFIX = "_capture_context" # because we only deal with 1 context right now47RESULT_TAG = "*RESULT"48TIME_BETWEEN_MESSAGE = 20 # in seconds49SUBPROCESS_TIMEOUT = 600 # in seconds50DEFAULT_RESULT_FILE = "results.txt"51DEFAULT_LOG_LEVEL = "info"52DEFAULT_MAX_JOBS = 853REPLAY_BINARY = "capture_replay_tests"54if sys.platform == "win32":55REPLAY_BINARY += ".exe"56TRACE_FOLDER = "traces"5758EXIT_SUCCESS = 059EXIT_FAILURE = 16061switch_case_without_return_template = """\62case {case}:63{namespace}::{call}({params});64break;65"""6667switch_case_with_return_template = """\68case {case}:69return {namespace}::{call}({params});70"""7172default_case_without_return_template = """\73default:74break;"""75default_case_with_return_template = """\76default:77return {default_val};"""7879test_trace_info_init_template = """\80{{81"{namespace}",82{namespace}::kReplayContextClientMajorVersion,83{namespace}::kReplayContextClientMinorVersion,84{namespace}::kReplayPlatformType,85{namespace}::kReplayDeviceType,86{namespace}::kReplayFrameStart,87{namespace}::kReplayFrameEnd,88{namespace}::kReplayDrawSurfaceWidth,89{namespace}::kReplayDrawSurfaceHeight,90{namespace}::kDefaultFramebufferRedBits,91{namespace}::kDefaultFramebufferGreenBits,92{namespace}::kDefaultFramebufferBlueBits,93{namespace}::kDefaultFramebufferAlphaBits,94{namespace}::kDefaultFramebufferDepthBits,95{namespace}::kDefaultFramebufferStencilBits,96{namespace}::kIsBinaryDataCompressed,97{namespace}::kAreClientArraysEnabled,98{namespace}::kbindGeneratesResources,99{namespace}::kWebGLCompatibility,100{namespace}::kRobustResourceInit,101}},102"""103104composite_h_file_template = """\105#pragma once106#include <vector>107#include <string>108109{trace_headers}110111struct TestTraceInfo {{112std::string testName;113uint32_t replayContextMajorVersion;114uint32_t replayContextMinorVersion;115EGLint replayPlatformType;116EGLint replayDeviceType;117uint32_t replayFrameStart;118uint32_t replayFrameEnd;119EGLint replayDrawSurfaceWidth;120EGLint replayDrawSurfaceHeight;121EGLint defaultFramebufferRedBits;122EGLint defaultFramebufferGreenBits;123EGLint defaultFramebufferBlueBits;124EGLint defaultFramebufferAlphaBits;125EGLint defaultFramebufferDepthBits;126EGLint defaultFramebufferStencilBits;127bool isBinaryDataCompressed;128bool areClientArraysEnabled;129bool bindGeneratesResources;130bool webGLCompatibility;131bool robustResourceInit;132}};133134extern std::vector<TestTraceInfo> testTraceInfos;135"""136137composite_cpp_file_template = """\138#include "{h_filename}"139140std::vector<TestTraceInfo> testTraceInfos =141{{142{test_trace_info_inits}143}};144"""145146147def winext(name, ext):148return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name149150151def AutodetectGoma():152return winext('compiler_proxy', 'exe') in (p.name() for p in psutil.process_iter())153154155class SubProcess():156157def __init__(self, command, logger, env=os.environ, pipe_stdout=PIPE_STDOUT):158# shell=False so that only 1 subprocess is spawned.159# if shell=True, a shell process is spawned, which in turn spawns the process running160# the command. Since we do not have a handle to the 2nd process, we cannot terminate it.161if pipe_stdout:162self.proc_handle = subprocess.Popen(163command, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)164else:165self.proc_handle = subprocess.Popen(command, env=env, shell=False)166self._logger = logger167168def Join(self, timeout):169self._logger.debug('Joining with subprocess %d, timeout %s' % (self.Pid(), str(timeout)))170output = self.proc_handle.communicate(timeout=timeout)[0]171if output:172output = output.decode('utf-8')173else:174output = ''175return self.proc_handle.returncode, output176177def Pid(self):178return self.proc_handle.pid179180def Kill(self):181self.proc_handle.terminate()182self.proc_handle.wait()183184185# class that manages all child processes of a process. Any process thats spawns subprocesses186# should have this. This object is created inside the main process, and each worker process.187class ChildProcessesManager():188189@classmethod190def _GetGnAndNinjaAbsolutePaths(self):191path = os.path.join('third_party', 'depot_tools')192return os.path.join(path, winext('gn', 'bat')), os.path.join(path, winext('ninja', 'exe'))193194def __init__(self, logger, ninja_lock):195# a dictionary of Subprocess, with pid as key196self.subprocesses = {}197# list of Python multiprocess.Process handles198self.workers = []199200self._gn_path, self._ninja_path = self._GetGnAndNinjaAbsolutePaths()201self._use_goma = AutodetectGoma()202self._logger = logger203self._ninja_lock = ninja_lock204205def RunSubprocess(self, command, env=None, pipe_stdout=True, timeout=None):206proc = SubProcess(command, self._logger, env, pipe_stdout)207self._logger.debug('Created subprocess: %s with pid %d' % (' '.join(command), proc.Pid()))208self.subprocesses[proc.Pid()] = proc209try:210returncode, output = self.subprocesses[proc.Pid()].Join(timeout)211self.RemoveSubprocess(proc.Pid())212if returncode != 0:213return -1, output214return returncode, output215except KeyboardInterrupt:216raise217except subprocess.TimeoutExpired as e:218self.RemoveSubprocess(proc.Pid())219return -2, str(e)220except Exception as e:221self.RemoveSubprocess(proc.Pid())222return -1, str(e)223224def RemoveSubprocess(self, subprocess_id):225assert subprocess_id in self.subprocesses226self.subprocesses[subprocess_id].Kill()227del self.subprocesses[subprocess_id]228229def AddWorker(self, worker):230self.workers.append(worker)231232def KillAll(self):233for subprocess_id in self.subprocesses:234self.subprocesses[subprocess_id].Kill()235for worker in self.workers:236worker.terminate()237worker.join()238worker.close() # to release file descriptors immediately239self.subprocesses = {}240self.workers = []241242def JoinWorkers(self):243for worker in self.workers:244worker.join()245worker.close()246self.workers = []247248def IsAnyWorkerAlive(self):249return any([worker.is_alive() for worker in self.workers])250251def GetRemainingWorkers(self):252count = 0253for worker in self.workers:254if worker.is_alive():255count += 1256return count257258def RunGNGen(self, args, build_dir, pipe_stdout, extra_gn_args=[]):259gn_args = [('angle_with_capture_by_default', 'true')] + extra_gn_args260if self._use_goma:261gn_args.append(('use_goma', 'true'))262if args.goma_dir:263gn_args.append(('goma_dir', '"%s"' % args.goma_dir))264if not args.debug:265gn_args.append(('is_debug', 'false'))266gn_args.append(('symbol_level', '1'))267gn_args.append(('angle_assert_always_on', 'true'))268if args.asan:269gn_args.append(('is_asan', 'true'))270args_str = ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args])271self._logger.info('Calling gn gen --args="%s"' % args_str)272cmd = [self._gn_path, 'gen', '--args=%s' % args_str, build_dir]273return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)274275def RunNinja(self, args, build_dir, target, pipe_stdout):276cmd = [self._ninja_path]277278# This code is taken from depot_tools/autoninja.py279if self._use_goma:280num_cores = multiprocessing.cpu_count()281cmd.append('-j')282core_multiplier = 40283j_value = num_cores * core_multiplier284285if sys.platform.startswith('win'):286# On windows, j value higher than 1000 does not improve build performance.287j_value = min(j_value, 1000)288elif sys.platform == 'darwin':289# On Mac, j value higher than 500 causes 'Too many open files' error290# (crbug.com/936864).291j_value = min(j_value, 500)292293cmd.append('%d' % j_value)294else:295cmd.append('-l')296cmd.append('%d' % os.cpu_count())297298cmd += ['-C', build_dir, target]299with self._ninja_lock:300self._logger.info('Running %s' % ' '.join(cmd))301return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)302303304def GetTestsListForFilter(args, test_path, filter, logger):305cmd = GetRunCommand(args, test_path) + ["--list-tests", "--gtest_filter=%s" % filter]306logger.info('Getting test list from "%s"' % " ".join(cmd))307return subprocess.check_output(cmd, text=True)308309310def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests,311logger):312output_lines = output.splitlines()313tests = []314seen_start_of_tests = False315disabled = 0316for line in output_lines:317l = line.strip()318if l == 'Tests list:':319seen_start_of_tests = True320elif l == 'End tests list.':321break322elif not seen_start_of_tests:323pass324elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests:325tests.append(l)326else:327disabled += 1328329logger.info('Found %s tests and %d disabled tests.' % (len(tests), disabled))330return tests331332333def GetRunCommand(args, command):334if args.xvfb:335return ['vpython', 'testing/xvfb.py', command]336else:337return [command]338339340class GroupedResult():341Passed = "Pass"342Failed = "Fail"343TimedOut = "Timeout"344Crashed = "Crashed"345CompileFailed = "CompileFailed"346Skipped = "Skipped"347348ResultTypes = [Passed, Failed, TimedOut, Crashed, CompileFailed, Skipped]349350def __init__(self, resultcode, message, output, tests):351self.resultcode = resultcode352self.message = message353self.output = output354self.tests = []355for test in tests:356self.tests.append(test)357358359class TestBatchResult():360361display_output_lines = 20362363def __init__(self, grouped_results, verbose):364self.results = {}365for result_type in GroupedResult.ResultTypes:366self.results[result_type] = []367368for grouped_result in grouped_results:369for test in grouped_result.tests:370self.results[grouped_result.resultcode].append(test.full_test_name)371372self.repr_str = ""373self.GenerateRepresentationString(grouped_results, verbose)374375def __str__(self):376return self.repr_str377378def GenerateRepresentationString(self, grouped_results, verbose):379for grouped_result in grouped_results:380self.repr_str += grouped_result.resultcode + ": " + grouped_result.message + "\n"381for test in grouped_result.tests:382self.repr_str += "\t" + test.full_test_name + "\n"383if verbose:384self.repr_str += grouped_result.output385else:386if grouped_result.resultcode == GroupedResult.CompileFailed:387self.repr_str += TestBatchResult.ExtractErrors(grouped_result.output)388elif grouped_result.resultcode != GroupedResult.Passed:389self.repr_str += TestBatchResult.GetAbbreviatedOutput(grouped_result.output)390391def ExtractErrors(output):392lines = output.splitlines()393error_lines = []394for i in range(len(lines)):395if ": error:" in lines[i]:396error_lines.append(lines[i] + "\n")397if i + 1 < len(lines):398error_lines.append(lines[i + 1] + "\n")399return "".join(error_lines)400401def GetAbbreviatedOutput(output):402# Get all lines after and including the last occurance of "Run".403lines = output.splitlines()404line_count = 0405for line_index in reversed(range(len(lines))):406line_count += 1407if "[ RUN ]" in lines[line_index]:408break409410return '\n' + '\n'.join(lines[-line_count:]) + '\n'411412413class Test():414415def __init__(self, test_name):416self.full_test_name = test_name417self.params = test_name.split('/')[1]418self.context_id = 0419self.test_index = -1 # index of test within a test batch420self._label = self.full_test_name.replace(".", "_").replace("/", "_")421422def __str__(self):423return self.full_test_name + " Params: " + self.params424425def GetLabel(self):426return self._label427428def CanRunReplay(self, trace_folder_path):429test_files = []430label = self.GetLabel() + "_capture"431assert (self.context_id == 0)432for f in os.listdir(trace_folder_path):433if os.path.isfile(os.path.join(trace_folder_path, f)) and f.startswith(label):434test_files.append(f)435frame_files_count = 0436context_header_count = 0437context_source_count = 0438source_txt_count = 0439context_id = 0440for f in test_files:441if "_frame" in f:442frame_files_count += 1443elif f.endswith(".txt"):444source_txt_count += 1445elif f.endswith(".h"):446context_header_count += 1447if TRACE_FILE_SUFFIX in f:448context = f.split(TRACE_FILE_SUFFIX)[1][:-2]449context_id = int(context)450elif f.endswith(".cpp"):451context_source_count += 1452can_run_replay = frame_files_count >= 1 and context_header_count >= 1 \453and context_source_count >= 1 and source_txt_count == 1454if not can_run_replay:455return False456self.context_id = context_id457return True458459460class TestBatch():461462CAPTURE_FRAME_END = 100463464def __init__(self, args, logger):465self.args = args466self.tests = []467self.results = []468self.logger = logger469470def SetWorkerId(self, worker_id):471self.trace_dir = "%s%d" % (TRACE_FOLDER, worker_id)472self.trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, self.trace_dir)473474def RunWithCapture(self, args, child_processes_manager):475test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite)476477extra_env = {478'ANGLE_CAPTURE_FRAME_END': '{}'.format(self.CAPTURE_FRAME_END),479'ANGLE_CAPTURE_SERIALIZE_STATE': '1',480'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables',481'ANGLE_CAPTURE_ENABLED': '1',482'ANGLE_CAPTURE_OUT_DIR': self.trace_folder_path,483}484485env = {**os.environ.copy(), **extra_env}486487if not self.args.keep_temp_files:488ClearFolderContent(self.trace_folder_path)489filt = ':'.join([test.full_test_name for test in self.tests])490491cmd = GetRunCommand(args, test_exe_path)492cmd += ['--gtest_filter=%s' % filt, '--angle-per-test-capture-label']493self.logger.info("Run capture: '{}' with env {}".format(' '.join(cmd), str(extra_env)))494495returncode, output = child_processes_manager.RunSubprocess(496cmd, env, timeout=SUBPROCESS_TIMEOUT)497if args.show_capture_stdout:498self.logger.info("Capture stdout: %s" % output)499if returncode == -1:500self.results.append(GroupedResult(GroupedResult.Crashed, "", output, self.tests))501return False502elif returncode == -2:503self.results.append(GroupedResult(GroupedResult.TimedOut, "", "", self.tests))504return False505return True506507def RemoveTestsThatDoNotProduceAppropriateTraceFiles(self):508continued_tests = []509skipped_tests = []510for test in self.tests:511if not test.CanRunReplay(self.trace_folder_path):512skipped_tests.append(test)513else:514continued_tests.append(test)515if len(skipped_tests) > 0:516self.results.append(517GroupedResult(518GroupedResult.Skipped,519"Skipping replay since capture didn't produce necessary trace files", "",520skipped_tests))521return continued_tests522523def BuildReplay(self, replay_build_dir, composite_file_id, tests, child_processes_manager):524# write gni file that holds all the traces files in a list525self.CreateGNIFile(composite_file_id, tests)526# write header and cpp composite files, which glue the trace files with527# CaptureReplayTests.cpp528self.CreateTestsCompositeFiles(composite_file_id, tests)529530gn_args = [('angle_build_capture_replay_tests', 'true'),531('angle_capture_replay_test_trace_dir', '"%s"' % self.trace_dir),532('angle_capture_replay_composite_file_id', str(composite_file_id))]533returncode, output = child_processes_manager.RunGNGen(self.args, replay_build_dir, True,534gn_args)535if returncode != 0:536self.results.append(537GroupedResult(GroupedResult.CompileFailed, "Build replay failed at gn generation",538output, tests))539return False540returncode, output = child_processes_manager.RunNinja(self.args, replay_build_dir,541REPLAY_BINARY, True)542if returncode != 0:543self.logger.warning('Ninja failure output: %s' % output)544self.results.append(545GroupedResult(GroupedResult.CompileFailed, "Build replay failed at ninja", output,546tests))547return False548return True549550def RunReplay(self, replay_build_dir, replay_exe_path, child_processes_manager, tests):551extra_env = {552'ANGLE_CAPTURE_ENABLED': '0',553'ANGLE_FEATURE_OVERRIDES_ENABLED': 'enable_capture_limits',554}555env = {**os.environ.copy(), **extra_env}556557self.logger.info("Run Replay: {} with env {}".format(replay_exe_path, str(extra_env)))558559returncode, output = child_processes_manager.RunSubprocess(560GetRunCommand(self.args, replay_exe_path), env, timeout=SUBPROCESS_TIMEOUT)561if returncode == -1:562cmd = replay_exe_path563self.results.append(564GroupedResult(GroupedResult.Crashed, "Replay run crashed (%s)" % cmd, output,565tests))566return567elif returncode == -2:568self.results.append(569GroupedResult(GroupedResult.TimedOut, "Replay run timed out", output, tests))570return571572output_lines = output.splitlines()573passes = []574fails = []575count = 0576for output_line in output_lines:577words = output_line.split(" ")578if len(words) == 3 and words[0] == RESULT_TAG:579if int(words[2]) == 0:580passes.append(self.FindTestByLabel(words[1]))581else:582fails.append(self.FindTestByLabel(words[1]))583self.logger.info("Context comparison failed: {}".format(584self.FindTestByLabel(words[1])))585self.PrintContextDiff(replay_build_dir, words[1])586587count += 1588if len(passes) > 0:589self.results.append(GroupedResult(GroupedResult.Passed, "", output, passes))590if len(fails) > 0:591self.results.append(GroupedResult(GroupedResult.Failed, "", output, fails))592593def PrintContextDiff(self, replay_build_dir, test_name):594frame = 1595while True:596capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name,597frame)598replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)599if os.path.exists(capture_file) and os.path.exists(replay_file):600captured_context = open(capture_file, "r").readlines()601replayed_context = open(replay_file, "r").readlines()602for line in difflib.unified_diff(603captured_context, replayed_context, fromfile=capture_file,604tofile=replay_file):605print(line, end="")606else:607if frame > self.CAPTURE_FRAME_END:608break609frame = frame + 1610611def FindTestByLabel(self, label):612for test in self.tests:613if test.GetLabel() == label:614return test615return None616617def AddTest(self, test):618assert len(self.tests) <= self.args.batch_count619test.index = len(self.tests)620self.tests.append(test)621622# gni file, which holds all the sources for a replay application623def CreateGNIFile(self, composite_file_id, tests):624test_list = []625for test in tests:626label = test.GetLabel()627assert (test.context_id > 0)628629fname = "%s%s%d_files.txt" % (label, TRACE_FILE_SUFFIX, test.context_id)630fpath = os.path.join(self.trace_folder_path, fname)631with open(fpath) as f:632files = f.readlines()633f.close()634files = ['"%s/%s"' % (self.trace_dir, file.strip()) for file in files]635angledata = "%s%s.angledata.gz" % (label, TRACE_FILE_SUFFIX)636test_list += [637'["%s", %s, [%s], "%s"]' % (label, test.context_id, ','.join(files), angledata)638]639gni_path = os.path.join(self.trace_folder_path, "traces%d.gni" % composite_file_id)640with open(gni_path, "w") as f:641f.write("trace_data = [\n%s\n]\n" % ',\n'.join(test_list))642f.close()643644# header and cpp composite files, which glue the trace files with CaptureReplayTests.cpp645def CreateTestsCompositeFiles(self, composite_file_id, tests):646# write CompositeTests header file647include_header_template = '#include "{header_file_path}.h"\n'648trace_headers = "".join([649include_header_template.format(header_file_path=test.GetLabel() + TRACE_FILE_SUFFIX +650str(test.context_id)) for test in tests651])652653h_filename = "CompositeTests%d.h" % composite_file_id654with open(os.path.join(self.trace_folder_path, h_filename), "w") as h_file:655h_file.write(composite_h_file_template.format(trace_headers=trace_headers))656h_file.close()657658# write CompositeTests cpp file659test_trace_info_inits = "".join([660test_trace_info_init_template.format(namespace=tests[i].GetLabel())661for i in range(len(tests))662])663664cpp_filename = "CompositeTests%d.cpp" % composite_file_id665with open(os.path.join(self.trace_folder_path, cpp_filename), "w") as cpp_file:666cpp_file.write(667composite_cpp_file_template.format(668h_filename=h_filename, test_trace_info_inits=test_trace_info_inits))669cpp_file.close()670671def __str__(self):672repr_str = "TestBatch:\n"673for test in self.tests:674repr_str += ("\t" + str(test) + "\n")675return repr_str676677def __getitem__(self, index):678assert index < len(self.tests)679return self.tests[index]680681def __iter__(self):682return iter(self.tests)683684def GetResults(self):685return TestBatchResult(self.results, self.args.verbose)686687688class TestExpectation():689# tests that must not be run as list690skipped_for_capture_tests = []691692# test expectations for tests that do not pass693non_pass_results = {}694695flaky_tests = []696697non_pass_re = {}698699# yapf: disable700# we want each pair on one line701result_map = { "FAIL" : GroupedResult.Failed,702"TIMEOUT" : GroupedResult.TimedOut,703"CRASHED" : GroupedResult.Crashed,704"COMPILE_FAILED" : GroupedResult.CompileFailed,705"SKIPPED_BY_GTEST" : GroupedResult.Skipped,706"PASS" : GroupedResult.Passed}707# yapf: enable708709def __init__(self, args):710expected_results_filename = "capture_replay_expectations.txt"711expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename)712self._asan = args.asan713with open(expected_results_path, "rt") as f:714for line in f:715l = line.strip()716if l != "" and not l.startswith("#"):717self.ReadOneExpectation(l, args.debug)718719def _CheckTagsWithConfig(self, tags, config_tags):720for tag in tags:721if tag not in config_tags:722return False723return True724725def ReadOneExpectation(self, line, is_debug):726(testpattern, result) = line.split('=')727(test_info_string, test_name_string) = testpattern.split(':')728test_name = test_name_string.strip()729test_info = test_info_string.strip().split()730result_stripped = result.strip()731732tags = []733if len(test_info) > 1:734tags = test_info[1:]735736config_tags = [GetPlatformForSkip()]737if self._asan:738config_tags += ['ASAN']739if is_debug:740config_tags += ['DEBUG']741742if self._CheckTagsWithConfig(tags, config_tags):743test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$')744if result_stripped == 'SKIP_FOR_CAPTURE':745self.skipped_for_capture_tests.append(test_name_regex)746elif result_stripped == 'FLAKY':747self.flaky_tests.append(test_name_regex)748else:749self.non_pass_results[test_name] = self.result_map[result_stripped]750self.non_pass_re[test_name] = test_name_regex751752def TestIsSkippedForCapture(self, test_name):753for p in self.skipped_for_capture_tests:754m = p.match(test_name)755if m is not None:756return True757return False758759def Filter(self, test_list, run_all_tests):760result = {}761for t in test_list:762for key in self.non_pass_results.keys():763if self.non_pass_re[key].match(t) is not None:764result[t] = self.non_pass_results[key]765if run_all_tests:766for skip in self.skipped_for_capture_tests:767if skip.match(t) is not None:768result[t] = "'forced skip'"769return result770771def IsFlaky(self, test_name):772for flaky in self.flaky_tests:773if flaky.match(test_name) is not None:774return True775return False776777778def ClearFolderContent(path):779all_files = []780for f in os.listdir(path):781if os.path.isfile(os.path.join(path, f)):782os.remove(os.path.join(path, f))783784def SetCWDToAngleFolder():785cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))786os.chdir(cwd)787return cwd788789790def RunTests(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock):791replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % worker_id)792replay_exec_path = os.path.join(replay_build_dir, REPLAY_BINARY)793794child_processes_manager = ChildProcessesManager(logger, ninja_lock)795# used to differentiate between multiple composite files when there are multiple test batchs796# running on the same worker and --deleted_trace is set to False797composite_file_id = 1798while not job_queue.empty():799try:800test_batch = job_queue.get()801message_queue.put("Starting {} tests on worker {}. Unstarted jobs: {}".format(802len(test_batch.tests), worker_id, job_queue.qsize()))803804test_batch.SetWorkerId(worker_id)805806success = test_batch.RunWithCapture(args, child_processes_manager)807if not success:808result_list.append(test_batch.GetResults())809message_queue.put(str(test_batch.GetResults()))810continue811continued_tests = test_batch.RemoveTestsThatDoNotProduceAppropriateTraceFiles()812if len(continued_tests) == 0:813result_list.append(test_batch.GetResults())814message_queue.put(str(test_batch.GetResults()))815continue816success = test_batch.BuildReplay(replay_build_dir, composite_file_id, continued_tests,817child_processes_manager)818if args.keep_temp_files:819composite_file_id += 1820if not success:821result_list.append(test_batch.GetResults())822message_queue.put(str(test_batch.GetResults()))823continue824test_batch.RunReplay(replay_build_dir, replay_exec_path, child_processes_manager,825continued_tests)826result_list.append(test_batch.GetResults())827message_queue.put(str(test_batch.GetResults()))828except KeyboardInterrupt:829child_processes_manager.KillAll()830raise831except queue.Empty:832child_processes_manager.KillAll()833break834except Exception as e:835message_queue.put("RunTestsException: %s\n%s" % (repr(e), traceback.format_exc()))836child_processes_manager.KillAll()837pass838child_processes_manager.KillAll()839840841def SafeDeleteFolder(folder_name):842while os.path.isdir(folder_name):843try:844shutil.rmtree(folder_name)845except KeyboardInterrupt:846raise847except PermissionError:848pass849850851def DeleteReplayBuildFolders(folder_num, replay_build_dir, trace_folder):852for i in range(folder_num):853folder_name = replay_build_dir + str(i)854if os.path.isdir(folder_name):855SafeDeleteFolder(folder_name)856857858def CreateTraceFolders(folder_num):859for i in range(folder_num):860folder_name = TRACE_FOLDER + str(i)861folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)862if os.path.isdir(folder_path):863shutil.rmtree(folder_path)864os.makedirs(folder_path)865866867def DeleteTraceFolders(folder_num):868for i in range(folder_num):869folder_name = TRACE_FOLDER + str(i)870folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)871if os.path.isdir(folder_path):872SafeDeleteFolder(folder_path)873874875def GetPlatformForSkip():876# yapf: disable877# we want each pair on one line878platform_map = { "win32" : "WIN",879"linux" : "LINUX" }880# yapf: enable881return platform_map.get(sys.platform, "UNKNOWN")882883884def main(args):885logger = multiprocessing.log_to_stderr()886logger.setLevel(level=args.log.upper())887888ninja_lock = multiprocessing.Lock()889child_processes_manager = ChildProcessesManager(logger, ninja_lock)890try:891start_time = time.time()892# set the number of workers to be cpu_count - 1 (since the main process already takes up a893# CPU core). Whenever a worker is available, it grabs the next job from the job queue and894# runs it. The worker closes down when there is no more job.895worker_count = min(multiprocessing.cpu_count() - 1, args.max_jobs)896cwd = SetCWDToAngleFolder()897898CreateTraceFolders(worker_count)899capture_build_dir = os.path.normpath(r"%s/Capture" % args.out_dir)900returncode, output = child_processes_manager.RunGNGen(args, capture_build_dir, False)901if returncode != 0:902logger.error(output)903child_processes_manager.KillAll()904return EXIT_FAILURE905# run ninja to build all tests906returncode, output = child_processes_manager.RunNinja(args, capture_build_dir,907args.test_suite, False)908if returncode != 0:909logger.error(output)910child_processes_manager.KillAll()911return EXIT_FAILURE912# get a list of tests913test_path = os.path.join(capture_build_dir, args.test_suite)914test_list = GetTestsListForFilter(args, test_path, args.filter, logger)915test_expectation = TestExpectation(args)916test_names = ParseTestNamesFromTestList(test_list, test_expectation,917args.also_run_skipped_for_capture_tests, logger)918test_expectation_for_list = test_expectation.Filter(919test_names, args.also_run_skipped_for_capture_tests)920# objects created by manager can be shared by multiple processes. We use it to create921# collections that are shared by multiple processes such as job queue or result list.922manager = multiprocessing.Manager()923job_queue = manager.Queue()924test_batch_num = int(math.ceil(len(test_names) / float(args.batch_count)))925926# put the test batchs into the job queue927for batch_index in range(test_batch_num):928batch = TestBatch(args, logger)929test_index = batch_index930while test_index < len(test_names):931batch.AddTest(Test(test_names[test_index]))932test_index += test_batch_num933job_queue.put(batch)934935passed_count = 0936failed_count = 0937timedout_count = 0938crashed_count = 0939compile_failed_count = 0940skipped_count = 0941942unexpected_count = {}943unexpected_test_results = {}944945for type in GroupedResult.ResultTypes:946unexpected_count[type] = 0947unexpected_test_results[type] = []948949# result list is created by manager and can be shared by multiple processes. Each950# subprocess populates the result list with the results of its test runs. After all951# subprocesses finish, the main process processes the results in the result list.952# An item in the result list is a tuple with 3 values (testname, result, output).953# The "result" can take 3 values "Passed", "Failed", "Skipped". The output is the954# stdout and the stderr of the test appended together.955result_list = manager.list()956message_queue = manager.Queue()957# so that we do not spawn more processes than we actually need958worker_count = min(worker_count, test_batch_num)959# spawning and starting up workers960for worker_id in range(worker_count):961proc = multiprocessing.Process(962target=RunTests,963args=(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock))964child_processes_manager.AddWorker(proc)965proc.start()966967# print out messages from the message queue populated by workers968# if there is no message, and the elapsed time between now and when the last message is969# print exceeds TIME_BETWEEN_MESSAGE, prints out a message to signal that tests are still970# running971last_message_timestamp = 0972while child_processes_manager.IsAnyWorkerAlive():973while not message_queue.empty():974msg = message_queue.get()975logger.info(msg)976last_message_timestamp = time.time()977cur_time = time.time()978if cur_time - last_message_timestamp > TIME_BETWEEN_MESSAGE:979last_message_timestamp = cur_time980logger.info("Tests are still running. Remaining workers: " + \981str(child_processes_manager.GetRemainingWorkers()) + \982". Unstarted jobs: " + str(job_queue.qsize()))983time.sleep(1.0)984child_processes_manager.JoinWorkers()985while not message_queue.empty():986msg = message_queue.get()987logger.warning(msg)988end_time = time.time()989990# print out results991logger.info("\n\n\n")992logger.info("Results:")993994flaky_results = []995996for test_batch in result_list:997test_batch_result = test_batch.results998logger.debug(str(test_batch_result))9991000passed_count += len(test_batch_result[GroupedResult.Passed])1001failed_count += len(test_batch_result[GroupedResult.Failed])1002timedout_count += len(test_batch_result[GroupedResult.TimedOut])1003crashed_count += len(test_batch_result[GroupedResult.Crashed])1004compile_failed_count += len(test_batch_result[GroupedResult.CompileFailed])1005skipped_count += len(test_batch_result[GroupedResult.Skipped])10061007for real_result, test_list in test_batch_result.items():1008for test in test_list:1009if test_expectation.IsFlaky(test):1010flaky_results.append("{} ({})".format(test, real_result))1011continue10121013# Passing tests are not in the list1014if test not in test_expectation_for_list.keys():1015if real_result != GroupedResult.Passed:1016unexpected_count[real_result] += 11017unexpected_test_results[real_result].append(1018"{} {} (expected Pass or is new test)".format(test, real_result))1019else:1020expected_result = test_expectation_for_list[test]1021if real_result != expected_result:1022unexpected_count[real_result] += 11023unexpected_test_results[real_result].append(1024"{} {} (expected {})".format(test, real_result, expected_result))10251026logger.info("")1027logger.info("Elapsed time: %.2lf seconds" % (end_time - start_time))1028logger.info("")10291030if len(flaky_results):1031logger.info("Flaky test(s):")1032for line in flaky_results:1033logger.info(" {}".format(line))1034logger.info("")10351036logger.info(1037"Summary: Passed: %d, Comparison Failed: %d, Crashed: %d, CompileFailed %d, Skipped: %d, Timeout: %d"1038% (passed_count, failed_count, crashed_count, compile_failed_count, skipped_count,1039timedout_count))10401041retval = EXIT_SUCCESS10421043unexpected_test_results_count = 01044for count in unexpected_count.values():1045unexpected_test_results_count += count10461047if unexpected_test_results_count > 0:1048retval = EXIT_FAILURE1049logger.info("")1050logger.info("Failure: Obtained {} results that differ from expectation:".format(1051unexpected_test_results_count))1052logger.info("")1053for result, count in unexpected_count.items():1054if count > 0:1055logger.info("Unexpected '{}' ({}):".format(result, count))1056for test_result in unexpected_test_results[result]:1057logger.info(" {}".format(test_result))1058logger.info("")10591060logger.info("\n\n")10611062# delete generated folders if --keep_temp_files flag is set to false1063if args.purge:1064DeleteTraceFolders(worker_count)1065if os.path.isdir(args.out_dir):1066SafeDeleteFolder(args.out_dir)10671068# Try hard to ensure output is finished before ending the test.1069logging.shutdown()1070sys.stdout.flush()1071time.sleep(2.0)1072return retval10731074except KeyboardInterrupt:1075child_processes_manager.KillAll()1076return EXIT_FAILURE107710781079if __name__ == '__main__':1080parser = argparse.ArgumentParser()1081parser.add_argument(1082'--out-dir',1083default=DEFAULT_OUT_DIR,1084help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".'1085% DEFAULT_OUT_DIR)1086# TODO(jmadill): Remove this argument. http://anglebug.com/61021087parser.add_argument(1088'--use-goma',1089action='store_true',1090help='Use goma for distributed builds. Requires internal access. Off by default.')1091parser.add_argument(1092'-f',1093'--filter',1094'--gtest_filter',1095default=DEFAULT_FILTER,1096help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER)1097parser.add_argument(1098'--test-suite',1099default=DEFAULT_TEST_SUITE,1100help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE)1101parser.add_argument(1102'--batch-count',1103default=DEFAULT_BATCH_COUNT,1104type=int,1105help='Number of tests in a batch. Default is %d.' % DEFAULT_BATCH_COUNT)1106parser.add_argument(1107'--keep-temp-files',1108action='store_true',1109help='Whether to keep the temp files and folders. Off by default')1110parser.add_argument('--purge', help='Purge all build directories on exit.')1111parser.add_argument(1112'--goma-dir',1113default='',1114help='Set custom goma directory. Uses the goma in path by default.')1115parser.add_argument(1116'--output-to-file',1117action='store_true',1118help='Whether to write output to a result file. Off by default')1119parser.add_argument(1120'--result-file',1121default=DEFAULT_RESULT_FILE,1122help='Name of the result file in the capture_replay_tests folder. Default is "%s".' %1123DEFAULT_RESULT_FILE)1124parser.add_argument('-v', "--verbose", action='store_true', help='Shows full test output.')1125parser.add_argument(1126'-l',1127'--log',1128default=DEFAULT_LOG_LEVEL,1129help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL)1130parser.add_argument(1131'-j',1132'--max-jobs',1133default=DEFAULT_MAX_JOBS,1134type=int,1135help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS)1136parser.add_argument(1137'-a',1138'--also-run-skipped-for-capture-tests',1139action='store_true',1140help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE')11411142# TODO(jmadill): Remove this argument. http://anglebug.com/61021143parser.add_argument('--depot-tools-path', default=None, help='Path to depot tools')1144parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.')1145parser.add_argument('--asan', action='store_true', help='Build with ASAN.')1146parser.add_argument(1147'--show-capture-stdout', action='store_true', help='Print test stdout during capture.')1148parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).')1149args = parser.parse_args()1150if args.debug and (args.out_dir == DEFAULT_OUT_DIR):1151args.out_dir = args.out_dir + "Debug"11521153if sys.platform == "win32":1154args.test_suite += ".exe"1155if args.output_to_file:1156logging.basicConfig(level=args.log.upper(), filename=args.result_file)1157else:1158logging.basicConfig(level=args.log.upper())11591160sys.exit(main(args))116111621163