xref: /aosp_15_r20/external/angle/src/tests/capture_replay_tests.py (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1#! /usr/bin/env vpython3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7"""
8Script testing capture_replay with angle_end2end_tests
9"""
10
11# Automation script will:
12# 1. Build all tests in angle_end2end with frame capture enabled
13# 2. Run each test with frame capture
14# 3. Build CaptureReplayTest with cpp trace files
15# 4. Run CaptureReplayTest
16# 5. Output the number of test successes and failures. A test succeeds if no error occurs during
17# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure
18# will return non-zero exit code
19
20# Run this script with Python to test capture replay on angle_end2end tests
21# python path/to/capture_replay_tests.py
22# Command line arguments: run with --help for a full list.
23
24import argparse
25import concurrent.futures
26import contextlib
27import difflib
28import distutils.util
29import getpass
30import glob
31import json
32import logging
33import os
34import pathlib
35import queue
36import random
37import re
38import shutil
39import subprocess
40import sys
41import tempfile
42import threading
43import time
44import traceback
45
46SCRIPT_DIR = str(pathlib.Path(__file__).resolve().parent)
47PY_UTILS = str(pathlib.Path(SCRIPT_DIR) / 'py_utils')
48if PY_UTILS not in sys.path:
49    os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS)
50import angle_test_util
51
52PIPE_STDOUT = True
53DEFAULT_OUT_DIR = "out/CaptureReplayTest"  # relative to angle folder
54DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader"
55DEFAULT_TEST_SUITE = "angle_end2end_tests"
56REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests"  # relative to angle folder
57DEFAULT_BATCH_COUNT = 1  # number of tests batched together for capture
58CAPTURE_FRAME_END = 100
59TRACE_FILE_SUFFIX = "_context"  # because we only deal with 1 context right now
60RESULT_TAG = "*RESULT"
61STATUS_MESSAGE_PERIOD = 20  # in seconds
62CAPTURE_SUBPROCESS_TIMEOUT = 600  # in seconds
63REPLAY_SUBPROCESS_TIMEOUT = 60  # in seconds
64DEFAULT_RESULT_FILE = "results.txt"
65DEFAULT_LOG_LEVEL = "info"
66DEFAULT_MAX_JOBS = 8
67REPLAY_BINARY = "capture_replay_tests"
68if sys.platform == "win32":
69    REPLAY_BINARY += ".exe"
70TRACE_FOLDER = "traces"
71
72EXIT_SUCCESS = 0
73EXIT_FAILURE = 1
74REPLAY_INITIALIZATION_FAILURE = -1
75REPLAY_SERIALIZATION_FAILURE = -2
76
77switch_case_without_return_template = """\
78        case {case}:
79            {namespace}::{call}({params});
80            break;
81"""
82
83switch_case_with_return_template = """\
84        case {case}:
85            return {namespace}::{call}({params});
86"""
87
88default_case_without_return_template = """\
89        default:
90            break;"""
91default_case_with_return_template = """\
92        default:
93            return {default_val};"""
94
95
96def winext(name, ext):
97    return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name
98
99
100GN_PATH = os.path.join('third_party', 'depot_tools', winext('gn', 'bat'))
101AUTONINJA_PATH = os.path.join('third_party', 'depot_tools', 'autoninja.py')
102
103
104def GetGnArgsStr(args, extra_gn_args=[]):
105    gn_args = [('angle_with_capture_by_default', 'true'),
106               ('angle_enable_vulkan_api_dump_layer', 'false'),
107               ('angle_enable_wgpu', 'false')] + extra_gn_args
108    if args.use_reclient:
109        gn_args.append(('use_remoteexec', 'true'))
110    if not args.debug:
111        gn_args.append(('is_debug', 'false'))
112        gn_args.append(('symbol_level', '1'))
113        gn_args.append(('angle_assert_always_on', 'true'))
114    if args.asan:
115        gn_args.append(('is_asan', 'true'))
116    return ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args])
117
118
119class XvfbPool(object):
120
121    def __init__(self, worker_count):
122        self.queue = queue.Queue()
123
124        self.processes = []
125        displays = set()
126        tmp = tempfile.TemporaryDirectory()
127
128        logging.info('Starting xvfb and openbox...')
129        # Based on the simplest case from testing/xvfb.py, with tweaks to minimize races.
130        try:
131            for worker in range(worker_count):
132                while True:
133                    # Pick a set of random displays from a custom range to hopefully avoid
134                    # collisions with anything else that might be using xvfb.
135                    # Another option would be -displayfd but that has its quirks too.
136                    display = random.randint(7700000, 7800000)
137                    if display in displays:
138                        continue
139
140                    x11_display_file = '/tmp/.X11-unix/X%d' % display
141
142                    if not os.path.exists(x11_display_file):
143                        break
144
145                displays.add(display)
146
147                x11_proc = subprocess.Popen([
148                    'Xvfb',
149                    ':%d' % display, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp',
150                    '-dpi', '96', '+extension', 'RANDR', '-maxclients', '512'
151                ],
152                                            stderr=subprocess.STDOUT)
153                self.processes.append(x11_proc)
154
155                start_time = time.time()
156                while not os.path.exists(x11_display_file):
157                    if time.time() - start_time >= 30:
158                        raise Exception('X11 failed to start')
159                    time.sleep(0.1)
160
161                env = os.environ.copy()
162                env['DISPLAY'] = ':%d' % display
163
164                # testing/xvfb.py uses signals instead, which is tricky with multiple displays.
165                openbox_ready_file = os.path.join(tmp.name, str(display))
166                openbox_proc = subprocess.Popen(
167                    ['openbox', '--sm-disable', '--startup',
168                     'touch %s' % openbox_ready_file],
169                    stderr=subprocess.STDOUT,
170                    env=env)
171                self.processes.append(openbox_proc)
172
173                start_time = time.time()
174                while not os.path.exists(openbox_ready_file):
175                    if time.time() - start_time >= 30:
176                        raise Exception('Openbox failed to start')
177                    time.sleep(0.1)
178
179                self.queue.put(display)
180
181            logging.info('Started a pool of %d xvfb displays: %s', worker_count,
182                         ' '.join(str(d) for d in sorted(displays)))
183        except Exception:
184            self.Teardown()
185            raise
186        finally:
187            tmp.cleanup()
188
189    def GrabDisplay(self):
190        return self.queue.get()
191
192    def ReleaseDisplay(self, display):
193        self.queue.put(display)
194
195    def Teardown(self):
196        logging.info('Stopping xvfb pool')
197        for p in reversed(self.processes):
198            p.kill()
199            p.wait()
200        self.processes = []
201
202
203@contextlib.contextmanager
204def MaybeXvfbPool(xvfb, worker_count):
205    if xvfb:
206        try:
207            xvfb_pool = XvfbPool(worker_count)
208            yield xvfb_pool
209        finally:
210            xvfb_pool.Teardown()
211    else:
212        yield None
213
214
215@contextlib.contextmanager
216def GetDisplayEnv(env, xvfb_pool):
217    if not xvfb_pool:
218        yield env
219        return
220
221    display = xvfb_pool.GrabDisplay()
222    display_var = ':%d' % display
223    try:
224        yield {**env, 'DISPLAY': display_var, 'XVFB_DISPLAY': display_var}
225    finally:
226        xvfb_pool.ReleaseDisplay(display)
227
228
229def TestLabel(test_name):
230    return test_name.replace(".", "_").replace("/", "_")
231
232
233def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests):
234    output_lines = output.splitlines()
235    tests = []
236    seen_start_of_tests = False
237    disabled = 0
238    for line in output_lines:
239        l = line.strip()
240        if l == 'Tests list:':
241            seen_start_of_tests = True
242        elif l == 'End tests list.':
243            break
244        elif not seen_start_of_tests:
245            pass
246        elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests:
247            tests.append(l)
248        else:
249            disabled += 1
250
251    logging.info('Found %s tests and %d disabled tests.' % (len(tests), disabled))
252    return tests
253
254
255class GroupedResult():
256    Passed = "Pass"
257    Failed = "Fail"
258    TimedOut = "Timeout"
259    CompileFailed = "CompileFailed"
260    CaptureFailed = "CaptureFailed"
261    ReplayFailed = "ReplayFailed"
262    Skipped = "Skipped"
263    FailedToTrace = "FailedToTrace"
264
265    ResultTypes = [
266        Passed, Failed, TimedOut, CompileFailed, CaptureFailed, ReplayFailed, Skipped,
267        FailedToTrace
268    ]
269
270    def __init__(self, resultcode, message, output, tests):
271        self.resultcode = resultcode
272        self.message = message
273        self.output = output
274        self.tests = []
275        for test in tests:
276            self.tests.append(test)
277
278
279def CaptureProducedRequiredFiles(all_trace_files, test_name):
280    label = TestLabel(test_name)
281
282    test_files = [f for f in all_trace_files if f.startswith(label)]
283
284    frame_files_count = 0
285    context_header_count = 0
286    context_source_count = 0
287    source_json_count = 0
288    context_id = 0
289    for f in test_files:
290        # TODO: Consolidate. http://anglebug.com/42266223
291        if "_001.cpp" in f or "_001.c" in f:
292            frame_files_count += 1
293        elif f.endswith(".json"):
294            source_json_count += 1
295        elif f.endswith(".h"):
296            context_header_count += 1
297            if TRACE_FILE_SUFFIX in f:
298                context = f.split(TRACE_FILE_SUFFIX)[1][:-2]
299                context_id = int(context)
300        # TODO: Consolidate. http://anglebug.com/42266223
301        elif f.endswith(".cpp") or f.endswith(".c"):
302            context_source_count += 1
303    got_all_files = (
304        frame_files_count >= 1 and context_header_count >= 1 and context_source_count >= 1 and
305        source_json_count == 1)
306    return got_all_files
307
308
309def GetCaptureEnv(args, trace_folder_path):
310    env = {
311        'ANGLE_CAPTURE_SERIALIZE_STATE': '1',
312        'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables',
313        'ANGLE_FEATURE_OVERRIDES_DISABLED': 'supportsHostImageCopy',
314        'ANGLE_CAPTURE_ENABLED': '1',
315        'ANGLE_CAPTURE_OUT_DIR': trace_folder_path,
316    }
317
318    if args.mec > 0:
319        env['ANGLE_CAPTURE_FRAME_START'] = '{}'.format(args.mec)
320        env['ANGLE_CAPTURE_FRAME_END'] = '{}'.format(args.mec + 1)
321    else:
322        env['ANGLE_CAPTURE_FRAME_END'] = '{}'.format(CAPTURE_FRAME_END)
323
324    if args.expose_nonconformant_features:
325        env['ANGLE_FEATURE_OVERRIDES_ENABLED'] += ':exposeNonConformantExtensionsAndVersions'
326
327    return env
328
329
330def PrintContextDiff(replay_build_dir, test_name):
331    frame = 1
332    found = False
333    while True:
334        capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name, frame)
335        replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)
336        if os.path.exists(capture_file) and os.path.exists(replay_file):
337            found = True
338            captured_context = open(capture_file, "r").readlines()
339            replayed_context = open(replay_file, "r").readlines()
340            for line in difflib.unified_diff(
341                    captured_context, replayed_context, fromfile=capture_file, tofile=replay_file):
342                print(line, end="")
343        else:
344            if frame > CAPTURE_FRAME_END:
345                break
346        frame = frame + 1
347    if not found:
348        logging.error('Could not find serialization diff files for %s' % test_name)
349
350
351def UnlinkContextStateJsonFilesIfPresent(replay_build_dir):
352    for f in glob.glob(os.path.join(replay_build_dir, '*_ContextCaptured*.json')):
353        os.unlink(f)
354    for f in glob.glob(os.path.join(replay_build_dir, '*_ContextReplayed*.json')):
355        os.unlink(f)
356
357
358class TestExpectation():
359    # tests that must not be run as list
360    skipped_for_capture_tests = {}
361    skipped_for_capture_tests_re = {}
362
363    # test expectations for tests that do not pass
364    non_pass_results = {}
365
366    # COMPILE_FAIL
367    compile_fail_tests = {}
368    compile_fail_tests_re = {}
369
370    flaky_tests = []
371
372    non_pass_re = {}
373
374    result_map = {
375        "FAIL": GroupedResult.Failed,
376        "TIMEOUT": GroupedResult.TimedOut,
377        "COMPILE_FAIL": GroupedResult.CompileFailed,
378        "NOT_RUN": GroupedResult.Skipped,
379        "SKIP_FOR_CAPTURE": GroupedResult.Skipped,
380        "PASS": GroupedResult.Passed,
381    }
382
383    def __init__(self, args):
384        expected_results_filename = "capture_replay_expectations.txt"
385        expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename)
386        self._asan = args.asan
387        with open(expected_results_path, "rt") as f:
388            for line in f:
389                l = line.strip()
390                if l != "" and not l.startswith("#"):
391                    self.ReadOneExpectation(l, args.debug)
392
393    def _CheckTagsWithConfig(self, tags, config_tags):
394        for tag in tags:
395            if tag not in config_tags:
396                return False
397        return True
398
399    def ReadOneExpectation(self, line, is_debug):
400        (testpattern, result) = line.split('=')
401        (test_info_string, test_name_string) = testpattern.split(':')
402        test_name = test_name_string.strip()
403        test_info = test_info_string.strip().split()
404        result_stripped = result.strip()
405
406        tags = []
407        if len(test_info) > 1:
408            tags = test_info[1:]
409
410        config_tags = [GetPlatformForSkip()]
411        if self._asan:
412            config_tags += ['ASAN']
413        if is_debug:
414            config_tags += ['DEBUG']
415
416        if self._CheckTagsWithConfig(tags, config_tags):
417            test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$')
418            if result_stripped == 'COMPILE_FAIL':
419                self.compile_fail_tests[test_name] = self.result_map[result_stripped]
420                self.compile_fail_tests_re[test_name] = test_name_regex
421            if result_stripped == 'SKIP_FOR_CAPTURE' or result_stripped == 'TIMEOUT':
422                self.skipped_for_capture_tests[test_name] = self.result_map[result_stripped]
423                self.skipped_for_capture_tests_re[test_name] = test_name_regex
424            elif result_stripped == 'FLAKY':
425                self.flaky_tests.append(test_name_regex)
426            else:
427                self.non_pass_results[test_name] = self.result_map[result_stripped]
428                self.non_pass_re[test_name] = test_name_regex
429
430    def TestIsSkippedForCapture(self, test_name):
431        return any(p.match(test_name) for p in self.skipped_for_capture_tests_re.values())
432
433    def TestIsCompileFail(self, test_name):
434        return any(p.match(test_name) for p in self.compile_fail_tests_re.values())
435
436    def Filter(self, test_list, run_all_tests):
437        result = {}
438        for t in test_list:
439            for key in self.non_pass_results.keys():
440                if self.non_pass_re[key].match(t) is not None:
441                    result[t] = self.non_pass_results[key]
442            for key in self.compile_fail_tests.keys():
443                if self.compile_fail_tests_re[key].match(t) is not None:
444                    result[t] = self.compile_fail_tests[key]
445            if run_all_tests:
446                for [key, r] in self.skipped_for_capture_tests.items():
447                    if self.skipped_for_capture_tests_re[key].match(t) is not None:
448                        result[t] = r
449        return result
450
451    def IsFlaky(self, test_name):
452        for flaky in self.flaky_tests:
453            if flaky.match(test_name) is not None:
454                return True
455        return False
456
457
458def GetPlatformForSkip():
459    # yapf: disable
460    # we want each pair on one line
461    platform_map = { 'win32' : 'WIN',
462                     'linux' : 'LINUX' }
463    # yapf: enable
464    return platform_map.get(sys.platform, 'UNKNOWN')
465
466
467def RunInParallel(f, lst, max_workers, stop_event):
468    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
469        future_to_arg = {executor.submit(f, arg): arg for arg in lst}
470        try:
471            for future in concurrent.futures.as_completed(future_to_arg):
472                yield future, future_to_arg[future]
473        except KeyboardInterrupt:
474            stop_event.set()
475            raise
476
477
478def RunProcess(cmd, env, xvfb_pool, stop_event, timeout):
479    stdout = [None]
480
481    def _Reader(process):
482        stdout[0] = process.stdout.read().decode()
483
484    with GetDisplayEnv(env, xvfb_pool) as run_env:
485        process = subprocess.Popen(
486            cmd, env=run_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
487        t = threading.Thread(target=_Reader, args=(process,))
488        t.start()
489        time_start = time.time()
490
491        while True:
492            time.sleep(0.1)
493            if process.poll() is not None:
494                t.join()
495                return process.returncode, stdout[0]
496            if timeout is not None and time.time() - time_start > timeout:
497                process.kill()
498                t.join()
499                return subprocess.TimeoutExpired, stdout[0]
500            if stop_event.is_set():
501                process.kill()
502                t.join()
503                return None, stdout[0]
504
505
506def ReturnCodeWithNote(rc):
507    s = 'rc=%s' % rc
508    if sys.platform.startswith('linux'):
509        if rc == -9:
510            # OOM killer sends SIGKILL to the process, return code is -signal
511            s += ' SIGKILL possibly due to OOM'
512    return s
513
514
515def RunCaptureInParallel(args, trace_folder_path, test_names, worker_count, xvfb_pool):
516    n = args.batch_count
517    test_batches = [test_names[i:i + n] for i in range(0, len(test_names), n)]
518
519    extra_env = GetCaptureEnv(args, trace_folder_path)
520    env = {**os.environ.copy(), **extra_env}
521    test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite)
522
523    stop_event = threading.Event()
524
525    def _RunCapture(tests):
526        filt = ':'.join(tests)
527
528        results_file = tempfile.mktemp()
529        cmd = [
530            test_exe_path,
531            '--gtest_filter=%s' % filt,
532            '--angle-per-test-capture-label',
533            '--results-file=' + results_file,
534        ]
535
536        # Add --use-config to avoid registering all test configurations
537        configs = set([t.split('/')[-1] for t in filt.split(':')])
538        if len(configs) == 1:
539            config, = configs
540            if '*' not in config:
541                cmd.append('--use-config=%s' % config)
542
543        test_results = None
544        try:
545            rc, stdout = RunProcess(cmd, env, xvfb_pool, stop_event, CAPTURE_SUBPROCESS_TIMEOUT)
546            if rc == 0:
547                with open(results_file) as f:
548                    test_results = json.load(f)
549        finally:
550            try:
551                os.unlink(results_file)
552            except Exception:
553                pass
554
555        return rc, test_results, stdout
556
557    skipped_by_suite = set()
558    capture_failed = False
559    for (future, tests) in RunInParallel(_RunCapture, test_batches, worker_count, stop_event):
560        rc, test_results, stdout = future.result()
561
562        if rc == subprocess.TimeoutExpired:
563            logging.error('Capture failed - timed out after %ss\nTests: %s\nPartial stdout:\n%s',
564                          CAPTURE_SUBPROCESS_TIMEOUT, ':'.join(tests), stdout)
565            capture_failed = True
566            continue
567
568        if rc != 0:
569            logging.error('Capture failed (%s)\nTests: %s\nStdout:\n%s\n', rc,
570                          ReturnCodeWithNote(rc), ':'.join(tests), stdout)
571            capture_failed = True
572            continue
573
574        if args.show_capture_stdout:
575            logging.info('Capture test stdout:\n%s\n', stdout)
576
577        for test_name, res in test_results['tests'].items():
578            if res['actual'] == 'SKIP':
579                skipped_by_suite.add(test_name)
580
581    return not capture_failed, skipped_by_suite
582
583
584def RunReplayTestsInParallel(args, replay_build_dir, replay_tests, expected_results,
585                             labels_to_tests, worker_count, xvfb_pool):
586    extra_env = {}
587    if args.expose_nonconformant_features:
588        extra_env['ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'exposeNonConformantExtensionsAndVersions'
589    env = {**os.environ.copy(), **extra_env}
590
591    stop_event = threading.Event()
592
593    def _RunReplay(test):
594        replay_exe_path = os.path.join(replay_build_dir, REPLAY_BINARY)
595        cmd = [replay_exe_path, TestLabel(test)]
596        return RunProcess(cmd, env, xvfb_pool, stop_event, REPLAY_SUBPROCESS_TIMEOUT)
597
598    replay_failed = False
599    for (future, test) in RunInParallel(_RunReplay, replay_tests, worker_count, stop_event):
600        expected_to_pass = expected_results[test] == GroupedResult.Passed
601        rc, stdout = future.result()
602        if rc == subprocess.TimeoutExpired:
603            if expected_to_pass:
604                logging.error('Replay failed - timed out after %ss\nTest: %s\nPartial stdout:\n%s',
605                              REPLAY_SUBPROCESS_TIMEOUT, test, stdout)
606                replay_failed = True
607            else:
608                logging.info('Ignoring replay timeout due to expectation: %s [expected %s]', test,
609                             expected_results[test])
610            continue
611
612        if rc != 0:
613            if expected_to_pass:
614                logging.error('Replay failed (%s)\nTest: %s\nStdout:\n%s\n',
615                              ReturnCodeWithNote(rc), test, stdout)
616                replay_failed = True
617            else:
618                logging.info('Ignoring replay failure due to expectation: %s [expected %s]', test,
619                             expected_results[test])
620            continue
621
622        if args.show_replay_stdout:
623            logging.info('Replay test stdout:\n%s\n', stdout)
624
625        output_lines = stdout.splitlines()
626        for output_line in output_lines:
627            words = output_line.split(" ")
628            if len(words) == 3 and words[0] == RESULT_TAG:
629                test_name = labels_to_tests[words[1]]
630                result = int(words[2])
631
632                if result == 0:
633                    pass
634                elif result == REPLAY_INITIALIZATION_FAILURE:
635                    if expected_to_pass:
636                        replay_failed = True
637                        logging.error('Replay failed. Initialization failure: %s' % test_name)
638                    else:
639                        logging.info(
640                            'Ignoring replay failure due to expectation: %s [expected %s]', test,
641                            expected_results[test])
642                elif result == REPLAY_SERIALIZATION_FAILURE:
643                    if expected_to_pass:
644                        replay_failed = True
645                        logging.error('Replay failed. Context comparison failed: %s' % test_name)
646                        PrintContextDiff(replay_build_dir, words[1])
647                    else:
648                        logging.info(
649                            'Ignoring replay context diff due to expectation: %s [expected %s]',
650                            test, expected_results[test])
651                else:
652                    replay_failed = True
653                    logging.error('Replay failed. Unknown result code: %s -> %d' %
654                                  (test_name, result))
655
656    return not replay_failed
657
658
659def CleanupAfterReplay(replay_build_dir, test_labels):
660    # Remove files that have test labels in the file name, .e.g:
661    # ClearTest_ClearIsClamped_ES2_Vulkan_SwiftShader.dll.pdb
662    for build_file in os.listdir(replay_build_dir):
663        if any(label in build_file for label in test_labels):
664            os.unlink(os.path.join(replay_build_dir, build_file))
665
666
667def main(args):
668    angle_test_util.SetupLogging(args.log.upper())
669
670    # Set cwd to ANGLE root
671    os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
672
673    if getpass.getuser() == 'chrome-bot':
674        # bots need different re-client auth settings than developers b/319246651
675        os.environ["RBE_use_gce_credentials"] = "true"
676        os.environ["RBE_use_application_default_credentials"] = "false"
677        os.environ["RBE_automatic_auth"] = "false"
678        os.environ["RBE_experimental_credentials_helper"] = ""
679        os.environ["RBE_experimental_credentials_helper_args"] = ""
680
681    trace_dir = "%s%d" % (TRACE_FOLDER, 0)
682    trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, trace_dir)
683    if os.path.exists(trace_folder_path):
684        shutil.rmtree(trace_folder_path)
685    os.makedirs(trace_folder_path)
686
687    capture_build_dir = os.path.join(args.out_dir, 'Capture')
688    replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % 0)
689
690    logging.info('Building capture tests')
691
692    subprocess.check_call([GN_PATH, 'gen', '--args=%s' % GetGnArgsStr(args), capture_build_dir])
693    subprocess.check_call(
694        [sys.executable, AUTONINJA_PATH, '-C', capture_build_dir, args.test_suite])
695
696    with MaybeXvfbPool(args.xvfb, 1) as xvfb_pool:
697        logging.info('Getting test list')
698        test_path = os.path.join(capture_build_dir, args.test_suite)
699        with GetDisplayEnv(os.environ, xvfb_pool) as env:
700            test_list = subprocess.check_output(
701                [test_path, "--list-tests",
702                 "--gtest_filter=%s" % args.filter], env=env, text=True)
703
704    test_expectation = TestExpectation(args)
705    test_names = ParseTestNamesFromTestList(test_list, test_expectation,
706                                            args.also_run_skipped_for_capture_tests)
707    test_expectation_for_list = test_expectation.Filter(test_names,
708                                                        args.also_run_skipped_for_capture_tests)
709
710    test_names = [
711        t for t in test_names if (not test_expectation.TestIsCompileFail(t) and
712                                  not test_expectation.TestIsSkippedForCapture(t))
713    ]
714
715    if not test_names:
716        logging.error('No capture tests to run. Is everything skipped?')
717        return EXIT_FAILURE
718
719    worker_count = min(args.max_jobs, os.cpu_count(), 1 + len(test_names) // 10)
720
721    logging.info('Running %d capture tests, worker_count=%d batch_count=%d', len(test_names),
722                 worker_count, args.batch_count)
723
724    with MaybeXvfbPool(args.xvfb, worker_count) as xvfb_pool:
725        success, skipped_by_suite = RunCaptureInParallel(args, trace_folder_path, test_names,
726                                                         worker_count, xvfb_pool)
727        if not success:
728            logging.error('Capture tests failed, see "Capture failed" errors above')
729            return EXIT_FAILURE
730
731        logging.info('RunCaptureInParallel finished')
732
733        labels_to_tests = {TestLabel(t): t for t in test_names}
734
735        all_trace_files = [f.name for f in os.scandir(trace_folder_path) if f.is_file()]
736
737        replay_tests = []
738        failed = False
739        for test_name in test_names:
740            if test_name not in skipped_by_suite:
741                if CaptureProducedRequiredFiles(all_trace_files, test_name):
742                    replay_tests.append(test_name)
743                else:
744                    logging.error('Capture failed: test missing replay files: %s', test_name)
745                    failed = True
746
747        if failed:
748            logging.error('Capture tests failed, see "Capture failed" errors above')
749            return EXIT_FAILURE
750
751        logging.info('CaptureProducedRequiredFiles finished')
752
753        composite_file_id = 1
754        names_path = os.path.join(trace_folder_path, 'test_names_%d.json' % composite_file_id)
755        with open(names_path, 'w') as f:
756            f.write(json.dumps({'traces': [TestLabel(t) for t in replay_tests]}))
757
758        replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % 0)
759        UnlinkContextStateJsonFilesIfPresent(replay_build_dir)
760
761        logging.info('Building replay tests')
762
763        extra_gn_args = [('angle_build_capture_replay_tests', 'true'),
764                         ('angle_capture_replay_test_trace_dir', '"%s"' % trace_dir),
765                         ('angle_capture_replay_composite_file_id', str(composite_file_id))]
766        subprocess.check_call(
767            [GN_PATH, 'gen',
768             '--args=%s' % GetGnArgsStr(args, extra_gn_args), replay_build_dir])
769        subprocess.check_call(
770            [sys.executable, AUTONINJA_PATH, '-C', replay_build_dir, REPLAY_BINARY])
771
772        if not replay_tests:
773            logging.error('No replay tests to run. Is everything skipped?')
774            return EXIT_FAILURE
775        logging.info('Running %d replay tests', len(replay_tests))
776
777        expected_results = {}
778        for test in replay_tests:
779            expected_result = test_expectation_for_list.get(test, GroupedResult.Passed)
780            if test_expectation.IsFlaky(test):
781                expected_result = 'Flaky'
782            expected_results[test] = expected_result
783
784        if not RunReplayTestsInParallel(args, replay_build_dir, replay_tests, expected_results,
785                                        labels_to_tests, worker_count, xvfb_pool):
786            logging.error('Replay tests failed, see "Replay failed" errors above')
787            return EXIT_FAILURE
788
789        logging.info('Replay tests finished successfully')
790
791    if not args.keep_temp_files:
792        CleanupAfterReplay(replay_build_dir, list(labels_to_tests.keys()))
793        shutil.rmtree(trace_folder_path)
794
795    return EXIT_SUCCESS
796
797
798if __name__ == '__main__':
799    parser = argparse.ArgumentParser()
800    parser.add_argument(
801        '--out-dir',
802        default=DEFAULT_OUT_DIR,
803        help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".'
804        % DEFAULT_OUT_DIR)
805    parser.add_argument(
806        '-f',
807        '--filter',
808        '--gtest_filter',
809        default=DEFAULT_FILTER,
810        help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER)
811    parser.add_argument(
812        '--test-suite',
813        default=DEFAULT_TEST_SUITE,
814        help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE)
815    parser.add_argument(
816        '--batch-count',
817        default=DEFAULT_BATCH_COUNT,
818        type=int,
819        help='Number of tests in a (capture) batch. Default is %d.' % DEFAULT_BATCH_COUNT)
820    parser.add_argument(
821        '--keep-temp-files',
822        action='store_true',
823        help='Whether to keep the temp files and folders. Off by default')
824    parser.add_argument(
825        '--use-reclient',
826        default=False,
827        action='store_true',
828        help='Set use_remoteexec=true in args.gn.')
829    parser.add_argument(
830        '--result-file',
831        default=DEFAULT_RESULT_FILE,
832        help='Name of the result file in the capture_replay_tests folder. Default is "%s".' %
833        DEFAULT_RESULT_FILE)
834    parser.add_argument('-v', '--verbose', action='store_true', help='Shows full test output.')
835    parser.add_argument(
836        '-l',
837        '--log',
838        default=DEFAULT_LOG_LEVEL,
839        help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL)
840    parser.add_argument(
841        '-j',
842        '--max-jobs',
843        default=DEFAULT_MAX_JOBS,
844        type=int,
845        help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS)
846    parser.add_argument(
847        '-M',
848        '--mec',
849        default=0,
850        type=int,
851        help='Enable mid execution capture starting at specified frame, (default: 0 = normal capture)'
852    )
853    parser.add_argument(
854        '-a',
855        '--also-run-skipped-for-capture-tests',
856        action='store_true',
857        help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE')
858    parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.')
859    parser.add_argument('--asan', action='store_true', help='Build with ASAN.')
860    parser.add_argument(
861        '-E',
862        '--expose-nonconformant-features',
863        action='store_true',
864        help='Expose non-conformant features to advertise GLES 3.2')
865    parser.add_argument(
866        '--show-capture-stdout', action='store_true', help='Print test stdout during capture.')
867    parser.add_argument(
868        '--show-replay-stdout', action='store_true', help='Print test stdout during replay.')
869    parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).')
870    args = parser.parse_args()
871    if args.debug and (args.out_dir == DEFAULT_OUT_DIR):
872        args.out_dir = args.out_dir + "Debug"
873
874    if sys.platform == "win32":
875        args.test_suite += ".exe"
876
877    sys.exit(main(args))
878