xref: /aosp_15_r20/art/test/testrunner/testrunner.py (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-no-jvmti-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import csv
64import datetime
65import fnmatch
66import itertools
67import json
68import multiprocessing
69import os
70import re
71import shlex
72import shutil
73import signal
74import subprocess
75import sys
76import tempfile
77import threading
78import time
79
80import env
81from target_config import target_config
82from device_config import device_config
83from typing import Dict, Set, List
84from functools import lru_cache
85from pathlib import Path
86
87# TODO: make it adjustable per tests and for buildbots
88#
89# Note: this needs to be larger than run-test timeouts, as long as this script
90#       does not push the value to run-test. run-test is somewhat complicated:
91#                      base: 25m  (large for ASAN)
92#        + timeout handling:  2m
93#        +   gcstress extra: 20m
94#        -----------------------
95#                            47m
96timeout = 3600 # 60 minutes
97
98if env.ART_TEST_RUN_ON_ARM_FVP:
99  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
100  timeout = 36000
101
102# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
103# that has key as the test name (like 001-HelloWorld), and value as set of
104# variants that the test is disabled for.
105DISABLED_TEST_CONTAINER = {}
106
107# The Dict contains the list of all possible variants for a given type. For example,
108# for key TARGET, the value would be target and host. The list is used to parse
109# the test name given as the argument to run.
110VARIANT_TYPE_DICT: Dict[str, Set[str]] = {}
111
112# The set of all variant sets that are incompatible and will always be skipped.
113NONFUNCTIONAL_VARIANT_SETS = set()
114
115# The set contains all the variants of each time.
116TOTAL_VARIANTS_SET: Set[str] = set()
117
118# The colors are used in the output. When a test passes, COLOR_PASS is used,
119# and so on.
120COLOR_ERROR = '\033[91m'
121COLOR_PASS = '\033[92m'
122COLOR_SKIP = '\033[93m'
123COLOR_NORMAL = '\033[0m'
124
125# The set contains the list of all the possible run tests that are in art/test
126# directory.
127RUN_TEST_SET = set()
128
129failed_tests = []
130skipped_tests = []
131
132# Flags
133n_thread = 0
134total_test_count = 0
135verbose = False
136dry_run = False
137ignore_skips = False
138build = False
139dist = False
140gdb = False
141gdb_arg = ''
142dump_cfg = ''
143gdb_dex2oat = False
144gdb_dex2oat_args = ''
145csv_result = None
146csv_writer = None
147runtime_option = ''
148with_agent: List[str] = []
149run_test_option: List[str] = []
150dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
151run_all_configs = False
152
153# Dict containing extra arguments
154extra_arguments: Dict[str, List[str]] = { "host" : [], "target" : [] }
155
156# Dict to store user requested test variants.
157# key: variant_type.
158# value: set of variants user wants to run of type <key>.
159_user_input_variants: collections.defaultdict = collections.defaultdict(set)
160
161
162class ChildProcessTracker(object):
163  """Keeps track of forked child processes to be able to kill them."""
164
165  def __init__(self):
166    self.procs = {}             # dict from pid to subprocess.Popen object
167    self.mutex = threading.Lock()
168
169  def wait(self, proc, timeout):
170    """Waits on the given subprocess and makes it available to kill_all meanwhile.
171
172    Args:
173      proc: The subprocess.Popen object to wait on.
174      timeout: Timeout passed on to proc.communicate.
175
176    Returns: A tuple of the process stdout output and its return value.
177    """
178    with self.mutex:
179      if self.procs is not None:
180        self.procs[proc.pid] = proc
181      else:
182        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
183    try:
184      output = proc.communicate(timeout=timeout)[0]
185      return_value = proc.wait()
186      return output, return_value
187    finally:
188      with self.mutex:
189        if self.procs is not None:
190          del self.procs[proc.pid]
191
192  def kill_all(self):
193    """Kills all currently running processes and any future ones."""
194    with self.mutex:
195      for pid in self.procs:
196        os.killpg(pid, signal.SIGKILL)
197      self.procs = None # Make future wait() calls kill their processes immediately.
198
199child_process_tracker = ChildProcessTracker()
200
201def setup_csv_result():
202  """Set up the CSV output if required."""
203  global csv_writer
204  csv_writer = csv.writer(csv_result)
205  # Write the header.
206  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
207                       'jni', 'image', 'debuggable', 'jvmti', 'test', 'address_size', 'result'])
208
209
210def send_csv_result(test, result):
211  """
212  Write a line into the CSV results file if one is available.
213  """
214  if csv_writer is not None:
215    csv_writer.writerow(extract_test_name(test) + [result])
216
217def close_csv_file():
218  global csv_result
219  global csv_writer
220  if csv_result is not None:
221    csv_writer = None
222    csv_result.flush()
223    csv_result.close()
224    csv_result = None
225
226def gather_test_info():
227  """The method gathers test information about the test to be run which includes
228  generating the list of total tests from the art/test directory and the list
229  of disabled test. It also maps various variants to types.
230  """
231  global TOTAL_VARIANTS_SET
232  # TODO: Avoid duplication of the variant names in different lists.
233  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
234  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
235  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
236  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
237  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
238  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
239  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
240  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
241  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
242  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
243  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
244                                'field-stress', 'step-stress'}
245  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
246                                   'optimizing', 'speed-profile', 'baseline'}
247
248  for v_type in VARIANT_TYPE_DICT:
249    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
250
251  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
252  for f in os.listdir(test_dir):
253    if fnmatch.fnmatch(f, '[0-9]*'):
254      RUN_TEST_SET.add(f)
255
256
257def setup_test_env():
258  """The method sets default value for the various variants of the tests if they
259  are already not set.
260  """
261  if env.ART_TEST_BISECTION:
262    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
263    env.ART_TEST_RUN_TEST_PREBUILD = False
264    # Bisection search writes to standard output.
265    env.ART_TEST_QUIET = False
266
267  global _user_input_variants
268  global run_all_configs
269  # These are the default variant-options we will use if nothing in the group is specified.
270  default_variants = {
271      'target': {'host', 'target'},
272      'prebuild': {'prebuild'},
273      'jvmti': { 'no-jvmti'},
274      'compiler': {'optimizing',
275                   'jit',
276                   'interpreter',
277                   'interp-ac',
278                   'speed-profile'},
279      'relocate': {'no-relocate'},
280      'trace': {'ntrace'},
281      'gc': {'cms'},
282      'jni': {'checkjni'},
283      'image': {'picimage'},
284      'debuggable': {'ndebuggable'},
285      'run': {'debug'},
286      # address_sizes_target depends on the target so it is dealt with below.
287  }
288  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
289  # want to pick up if we pass --all.
290  default_variants_keys = default_variants.keys()
291  if run_all_configs:
292    default_variants = VARIANT_TYPE_DICT
293
294  for key in default_variants_keys:
295    if not _user_input_variants[key]:
296      _user_input_variants[key] = default_variants[key]
297
298  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
299  if not _user_input_variants['address_sizes']:
300    _user_input_variants['address_sizes_target']['target'].add(
301        env.ART_PHONY_TEST_TARGET_SUFFIX)
302    _user_input_variants['address_sizes_target']['host'].add(
303        env.ART_PHONY_TEST_HOST_SUFFIX)
304    if env.ART_TEST_RUN_TEST_2ND_ARCH:
305      _user_input_variants['address_sizes_target']['host'].add(
306          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
307      _user_input_variants['address_sizes_target']['target'].add(
308          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
309  else:
310    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
311    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
312
313  global n_thread
314  if 'target' in _user_input_variants['target']:
315    device_name = get_device_name()
316    if n_thread == 0:
317      # Use only part of the cores since fully loading the device tends to lead to timeouts.
318      fraction = 1.0 if env.ART_TEST_ON_VM else 0.75
319      n_thread = max(1, int(get_target_cpu_count() * fraction))
320      if device_name == 'fugu':
321        n_thread = 1
322  else:
323    device_name = "host"
324    if n_thread == 0:
325      n_thread = get_host_cpu_count()
326  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
327
328  global extra_arguments
329  for target in _user_input_variants['target']:
330    extra_arguments[target] = find_extra_device_arguments(target)
331
332  if not sys.stdout.isatty():
333    global COLOR_ERROR
334    global COLOR_PASS
335    global COLOR_SKIP
336    global COLOR_NORMAL
337    COLOR_ERROR = ''
338    COLOR_PASS = ''
339    COLOR_SKIP = ''
340    COLOR_NORMAL = ''
341
342def find_extra_device_arguments(target):
343  """
344  Gets any extra arguments from the device_config.
345  """
346  device_name = target
347  if target == 'target':
348    device_name = get_device_name()
349  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
350
351def get_device_name():
352  """
353  Gets the value of ro.product.name from remote device (unless running on a VM).
354  """
355  if env.ART_TEST_RUN_FROM_SOONG:
356    return "target"  # We can't use adb during build.
357  if env.ART_TEST_ON_VM:
358    return subprocess.Popen(f"{env.ART_SSH_CMD} uname -a".split(),
359                            stdout = subprocess.PIPE,
360                            universal_newlines=True).stdout.read().strip()
361
362  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
363                          stderr=subprocess.STDOUT,
364                          stdout = subprocess.PIPE,
365                          universal_newlines=True)
366  # only wait 2 seconds.
367  timeout_val = 2
368
369  if env.ART_TEST_RUN_ON_ARM_FVP:
370    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
371    timeout_val = 200
372
373  output = proc.communicate(timeout = timeout_val)[0]
374  success = not proc.wait()
375  if success:
376    return output.strip()
377  else:
378    print_text("Unable to determine device type!\n")
379    print_text("Continuing anyway.\n")
380    return "UNKNOWN_TARGET"
381
382def run_tests(tests):
383  """This method generates variants of the tests to be run and executes them.
384
385  Args:
386    tests: The set of tests to be run.
387  """
388  args_all = []
389
390  # jvm does not run with all these combinations,
391  # or at least it doesn't make sense for most of them.
392  # TODO: support some jvm variants like jvmti ?
393  target_input_variants = _user_input_variants['target']
394  uncombinated_target_input_variants = []
395  if 'jvm' in target_input_variants:
396    _user_input_variants['target'].remove('jvm')
397    uncombinated_target_input_variants.append('jvm')
398
399  global total_test_count
400  total_test_count = len(tests)
401  if target_input_variants:
402    for variant_type in VARIANT_TYPE_DICT:
403      if not (variant_type == 'target' or 'address_sizes' in variant_type):
404        total_test_count *= len(_user_input_variants[variant_type])
405  target_address_combinations = 0
406  for target in target_input_variants:
407    for address_size in _user_input_variants['address_sizes_target'][target]:
408      target_address_combinations += 1
409  target_address_combinations += len(uncombinated_target_input_variants)
410  total_test_count *= target_address_combinations
411
412  if env.ART_TEST_WITH_STRACE:
413    args_all += ['--strace']
414
415  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
416    args_all += ['--always-clean']
417
418  if env.ART_TEST_BISECTION:
419    args_all += ['--bisection-search']
420
421  if gdb:
422    args_all += ['--gdb']
423    if gdb_arg:
424      args_all += ['--gdb-arg', gdb_arg]
425
426  if dump_cfg:
427    args_all += ['--dump-cfg', dump_cfg]
428  if gdb_dex2oat:
429    args_all += ['--gdb-dex2oat']
430    if gdb_dex2oat_args:
431      args_all += ['--gdb-dex2oat-args', f'{gdb_dex2oat_args}']
432
433  args_all += run_test_option
434
435  if runtime_option:
436    for opt in runtime_option:
437      args_all += ['--runtime-option', opt]
438  if with_agent:
439    for opt in with_agent:
440      args_all += ['--with-agent', opt]
441
442  if dex2oat_jobs != -1:
443    args_all += ['--dex2oat-jobs', str(dex2oat_jobs)]
444
445  def iter_config(tests, input_variants, user_input_variants):
446    config = itertools.product(tests, input_variants, user_input_variants['run'],
447                                 user_input_variants['prebuild'], user_input_variants['compiler'],
448                                 user_input_variants['relocate'], user_input_variants['trace'],
449                                 user_input_variants['gc'], user_input_variants['jni'],
450                                 user_input_variants['image'],
451                                 user_input_variants['debuggable'], user_input_variants['jvmti'])
452    return config
453
454  # [--host, --target] combines with all the other user input variants.
455  config = iter_config(tests, target_input_variants, _user_input_variants)
456  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
457  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
458  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
459      'prebuild': [''], 'compiler': [''],
460      'relocate': [''], 'trace': [''],
461      'gc': [''], 'jni': [''],
462      'image': [''],
463      'debuggable': [''], 'jvmti': ['']})
464
465  def start_combination(executor, config_tuple, global_options, address_size):
466      test, target, run, prebuild, compiler, relocate, trace, gc, \
467      jni, image, debuggable, jvmti = config_tuple
468
469      # NB The order of components here should match the order of
470      # components in the regex parser in parse_test_name.
471      test_name = 'test-art-'
472      test_name += target + '-run-test-'
473      test_name += run + '-'
474      test_name += prebuild + '-'
475      test_name += compiler + '-'
476      test_name += relocate + '-'
477      test_name += trace + '-'
478      test_name += gc + '-'
479      test_name += jni + '-'
480      test_name += image + '-'
481      test_name += debuggable + '-'
482      test_name += jvmti + '-'
483      test_name += test
484      test_name += address_size
485
486      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
487                     image, debuggable, jvmti, address_size}
488
489      args_test = global_options.copy()
490
491      if target == 'host':
492        args_test += ['--host']
493      elif target == 'jvm':
494        args_test += ['--jvm']
495
496      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
497      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
498      # for target tests.
499      if target == 'target':
500        if env.ART_TEST_CHROOT:
501          args_test += ['--chroot', env.ART_TEST_CHROOT]
502        if env.ART_TEST_ANDROID_ROOT:
503          args_test += ['--android-root', env.ART_TEST_ANDROID_ROOT]
504        if env.ART_TEST_ANDROID_I18N_ROOT:
505            args_test += ['--android-i18n-root', env.ART_TEST_ANDROID_I18N_ROOT]
506        if env.ART_TEST_ANDROID_ART_ROOT:
507          args_test += ['--android-art-root', env.ART_TEST_ANDROID_ART_ROOT]
508        if env.ART_TEST_ANDROID_TZDATA_ROOT:
509          args_test += ['--android-tzdata-root', env.ART_TEST_ANDROID_TZDATA_ROOT]
510
511      if run == 'ndebug':
512        args_test += ['-O']
513
514      if prebuild == 'prebuild':
515        args_test += ['--prebuild']
516      elif prebuild == 'no-prebuild':
517        args_test += ['--no-prebuild']
518
519      if compiler == 'optimizing':
520        args_test += ['--optimizing']
521      elif compiler == 'interpreter':
522        args_test += ['--interpreter']
523      elif compiler == 'interp-ac':
524        args_test += ['--switch-interpreter', '--verify-soft-fail']
525      elif compiler == 'jit':
526        args_test += ['--jit']
527      elif compiler == 'jit-on-first-use':
528        args_test += ['--jit', '--runtime-option', '-Xjitthreshold:0']
529      elif compiler == 'speed-profile':
530        args_test += ['--random-profile']
531      elif compiler == 'baseline':
532        args_test += ['--baseline']
533
534      if relocate == 'relocate':
535        args_test += ['--relocate']
536      elif relocate == 'no-relocate':
537        args_test += ['--no-relocate']
538
539      if trace == 'trace':
540        args_test += ['--trace']
541      elif trace == 'stream':
542        args_test += ['--trace', '--stream']
543
544      if gc == 'gcverify':
545        args_test += ['--gcverify']
546      elif gc == 'gcstress':
547        args_test += ['--gcstress']
548
549      if jni == 'forcecopy':
550        args_test += ['--runtime-option', '-Xjniopts:forcecopy']
551      elif jni == 'checkjni':
552        args_test += ['--runtime-option', '-Xcheck:jni']
553
554      if image == 'no-image':
555        args_test += ['--no-image']
556
557      if debuggable == 'debuggable':
558        args_test += ['--debuggable', '--runtime-option', '-Xopaque-jni-ids:true']
559
560      if jvmti == 'jvmti-stress':
561        args_test += ['--jvmti-trace-stress', '--jvmti-redefine-stress', '--jvmti-field-stress']
562      elif jvmti == 'field-stress':
563        args_test += ['--jvmti-field-stress']
564      elif jvmti == 'trace-stress':
565        args_test += ['--jvmti-trace-stress']
566      elif jvmti == 'redefine-stress':
567        args_test += ['--jvmti-redefine-stress']
568      elif jvmti == 'step-stress':
569        args_test += ['--jvmti-step-stress']
570
571      if address_size == '64':
572        args_test += ['--64']
573
574      # Run the run-test script using the prebuilt python.
575      python3_bin = env.ANDROID_BUILD_TOP + "/prebuilts/build-tools/path/linux-x86/python3"
576      run_test_sh = str(Path(__file__).parent.parent / 'run-test')
577      if not os.path.exists(python3_bin):
578        python3_bin = sys.executable  # Fallback to current python if we are in a sandbox.
579      args_test = [python3_bin, run_test_sh] + args_test + extra_arguments[target] + [test]
580      return executor.submit(run_test, args_test, test, variant_set, test_name)
581
582  global n_thread
583  with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
584    test_futures = []
585    for config_tuple in config:
586      target = config_tuple[1]
587      for address_size in _user_input_variants['address_sizes_target'][target]:
588        test_futures.append(start_combination(executor, config_tuple, args_all, address_size))
589
590    for config_tuple in uncombinated_config:
591      test_futures.append(
592          start_combination(executor, config_tuple, args_all, ""))  # no address size
593
594    try:
595      tests_done = 0
596      for test_future in concurrent.futures.as_completed(f for f in test_futures if f):
597        (test, status, failure_info, test_time) = test_future.result()
598        tests_done += 1
599        print_test_info(tests_done, test, status, failure_info, test_time)
600        if failure_info and not env.ART_TEST_KEEP_GOING:
601          for f in test_futures:
602            f.cancel()
603          break
604    except KeyboardInterrupt:
605      for f in test_futures:
606        f.cancel()
607      child_process_tracker.kill_all()
608    executor.shutdown(True)
609
610def _popen(**kwargs):
611  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
612    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
613  return subprocess.Popen(**kwargs)
614
615def run_test(args, test, test_variant, test_name):
616  """Runs the test.
617
618  It invokes art/test/run-test script to run the test. The output of the script
619  is checked, and if it ends with "Succeeded!", it assumes that the tests
620  passed, otherwise, put it in the list of failed test. Before actually running
621  the test, it also checks if the test is placed in the list of disabled tests,
622  and if yes, it skips running it, and adds the test in the list of skipped
623  tests.
624
625  Args:
626    args: The command to be used to invoke the script
627    test: The name of the test without the variant information.
628    test_variant: The set of variant for the test.
629    test_name: The name of the test along with the variants.
630
631  Returns: a tuple of testname, status, optional failure info, and test time.
632  """
633  try:
634    command = ' '.join(args)
635
636    if is_test_disabled(test, test_variant):
637      test_skipped = True
638      test_time = datetime.timedelta()
639    else:
640      test_skipped = False
641      test_start_time = time.monotonic()
642      if verbose:
643        print_text("Starting %s at %s\n" % (test_name, test_start_time))
644      environ = dict(os.environ)
645      environ["FULL_TEST_NAME"] = test_name
646      if gdb or gdb_dex2oat:
647        proc = _popen(
648          args=args,
649          env=environ,
650          stderr=subprocess.STDOUT,
651          universal_newlines=True,
652          start_new_session=True
653        )
654      else:
655        proc = _popen(
656          args=args,
657          env=environ,
658          stderr=subprocess.STDOUT,
659          stdout = subprocess.PIPE,
660          universal_newlines=True,
661          start_new_session=True,
662        )
663      script_output, return_value = child_process_tracker.wait(proc, timeout)
664      test_passed = not return_value
665      test_time_seconds = time.monotonic() - test_start_time
666      test_time = datetime.timedelta(seconds=test_time_seconds)
667
668    if not test_skipped:
669      if test_passed:
670        return (test_name, 'PASS', None, test_time)
671      else:
672        failed_tests.append((test_name, str(command) + "\n" + script_output))
673        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
674    elif not dry_run:
675      skipped_tests.append(test_name)
676      return (test_name, 'SKIP', None, test_time)
677    else:
678      return (test_name, 'PASS', None, test_time)
679  except subprocess.TimeoutExpired as e:
680    if verbose:
681      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
682    test_time_seconds = time.monotonic() - test_start_time
683    test_time = datetime.timedelta(seconds=test_time_seconds)
684    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
685
686    # HACK(b/142039427): Print extra backtraces on timeout.
687    if "-target-" in test_name and not env.ART_TEST_ON_VM:
688      for i in range(8):
689        proc_name = "dalvikvm" + test_name[-2:]
690        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
691        for pid in pidof.stdout.decode("ascii").split():
692          if i >= 4:
693            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
694            subprocess.run(["adb", "shell", "debuggerd", pid])
695            time.sleep(10)
696          task_dir = "/proc/%s/task" % pid
697          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
698          for tid in tids.stdout.decode("ascii").split():
699            for status in ["stat", "status"]:
700              filename = "%s/%s/%s" % (task_dir, tid, status)
701              print_text("Content of %s\n" % (filename))
702              subprocess.run(["adb", "shell", "cat", filename])
703        time.sleep(60)
704
705    # The python documentation states that it is necessary to actually kill the process.
706    os.killpg(proc.pid, signal.SIGKILL)
707    script_output = proc.communicate()
708
709    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
710  except Exception as e:
711    failed_tests.append((test_name, str(e)))
712    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
713
714@lru_cache
715def get_console_width(default=100):
716  # NB: The command may fail if we are running under 'nohup'.
717  proc = subprocess.run(['stty', 'size'], capture_output=True)
718  return int(proc.stdout.decode("utf8").split()[1]) if proc.returncode == 0 else default
719
720def print_test_info(test_count, test_name, result, failed_test_info="",
721                    test_time=datetime.timedelta()):
722  """Print the continous test information
723
724  If verbose is set to True, it continuously prints test status information
725  on a new line.
726  If verbose is set to False, it keeps on erasing test
727  information by overriding it with the latest test information. Also,
728  in this case it stictly makes sure that the information length doesn't
729  exceed the console width. It does so by shortening the test_name.
730
731  When a test fails, it prints the output of the run-test script and
732  command used to invoke the script. It doesn't override the failing
733  test information in either of the cases.
734  """
735
736  info = ''
737  if not verbose:
738    # Without --verbose, the testrunner erases passing test info. It
739    # does that by overriding the printed text with white spaces all across
740    # the console width.
741    info = '\r' + ' ' * get_console_width() + '\r'
742  try:
743    percent = (test_count * 100) / total_test_count
744    progress_info = ('[ %d%% %d/%d ]') % (
745      percent,
746      test_count,
747      total_test_count)
748    if test_time.total_seconds() != 0 and verbose:
749      info += '(%s)' % str(test_time)
750
751
752    if result == 'FAIL' or result == 'TIMEOUT':
753      if not verbose:
754        info += ('%s %s %s\n') % (
755          progress_info,
756          test_name,
757          COLOR_ERROR + result + COLOR_NORMAL)
758      else:
759        info += ('%s %s %s\n%s\n') % (
760          progress_info,
761          test_name,
762          COLOR_ERROR + result + COLOR_NORMAL,
763          failed_test_info)
764    else:
765      result_text = ''
766      if result == 'PASS':
767        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
768      elif result == 'SKIP':
769        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
770
771      if verbose:
772        info += ('%s %s %s\n') % (
773          progress_info,
774          test_name,
775          result_text)
776      else:
777        total_output_length = 2 # Two spaces
778        total_output_length += len(progress_info)
779        total_output_length += len(result)
780        allowed_test_length = get_console_width() - total_output_length
781        test_name_len = len(test_name)
782        if allowed_test_length < test_name_len:
783          test_name = ('...%s') % (
784            test_name[-(allowed_test_length - 3):])
785        info += ('%s %s %s') % (
786          progress_info,
787          test_name,
788          result_text)
789    send_csv_result(test_name, result)
790    print_text(info)
791  except Exception as e:
792    print_text(('%s\n%s\n') % (test_name, str(e)))
793    failed_tests.append(test_name)
794
795def verify_knownfailure_entry(entry):
796  supported_field = {
797      'tests' : (list, str),
798      'test_patterns' : (list,),
799      'description' : (list, str),
800      'bug' : (str,),
801      'variant' : (str,),
802      'devices': (list, str),
803      'env_vars' : (dict,),
804  }
805  for field in entry:
806    field_type = type(entry[field])
807    if field_type not in supported_field[field]:
808      raise ValueError('%s is not supported type for %s\n%s' % (
809          str(field_type),
810          field,
811          str(entry)))
812
813def get_disabled_test_info(device_name):
814  """Generate set of known failures.
815
816  It parses the art/test/knownfailures.json file to generate the list of
817  disabled tests.
818
819  Returns:
820    The method returns a dict of tests mapped to the variants list
821    for which the test should not be run.
822  """
823  known_failures_file = Path(__file__).parent.parent / 'knownfailures.json'
824  with open(known_failures_file) as known_failures_json:
825    known_failures_info = json.loads(known_failures_json.read())
826
827  disabled_test_info = {}
828  for failure in known_failures_info:
829    verify_knownfailure_entry(failure)
830    tests = failure.get('tests', [])
831    if isinstance(tests, str):
832      tests = [tests]
833    patterns = failure.get("test_patterns", [])
834    if (not isinstance(patterns, list)):
835      raise ValueError("test_patterns is not a list in %s" % failure)
836
837    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
838    variants = parse_variants(failure.get('variant'))
839
840    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
841    # "foo" is present in "devices".
842    device_names = failure.get('devices', [])
843    if isinstance(device_names, str):
844      device_names = [device_names]
845    if len(device_names) != 0:
846      if device_name in device_names:
847        variants.add('target')
848      else:
849        # Skip adding test info as device_name is not present in "devices" entry.
850        continue
851
852    env_vars = failure.get('env_vars')
853
854    if check_env_vars(env_vars):
855      for test in tests:
856        if test not in RUN_TEST_SET:
857          if env.ART_TEST_RUN_FROM_SOONG:
858            continue  # Soong can see only sub-set of the tests within the shard.
859          raise ValueError('%s is not a valid run-test' % (
860              test))
861        if test in disabled_test_info:
862          disabled_test_info[test] = disabled_test_info[test].union(variants)
863        else:
864          disabled_test_info[test] = variants
865
866  return disabled_test_info
867
868def gather_disabled_test_info():
869  global DISABLED_TEST_CONTAINER
870  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
871  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
872
873def check_env_vars(env_vars):
874  """Checks if the env variables are set as required to run the test.
875
876  Returns:
877    True if all the env variables are set as required, otherwise False.
878  """
879
880  if not env_vars:
881    return True
882  for key in env_vars:
883    if env.get_env(key) != env_vars.get(key):
884      return False
885  return True
886
887
888def is_test_disabled(test, variant_set):
889  """Checks if the test along with the variant_set is disabled.
890
891  Args:
892    test: The name of the test as in art/test directory.
893    variant_set: Variants to be used for the test.
894  Returns:
895    True, if the test is disabled.
896  """
897  if dry_run:
898    return True
899  if test in env.EXTRA_DISABLED_TESTS:
900    return True
901  if ignore_skips:
902    return False
903  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
904  for variants in variants_list:
905    variants_present = True
906    for variant in variants:
907      if variant not in variant_set:
908        variants_present = False
909        break
910    if variants_present:
911      return True
912  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
913    if bad_combo.issubset(variant_set):
914      return True
915  return False
916
917
918def parse_variants(variants):
919  """Parse variants fetched from art/test/knownfailures.json.
920  """
921  if not variants:
922    variants = ''
923    for variant in TOTAL_VARIANTS_SET:
924      variants += variant
925      variants += '|'
926    variants = variants[:-1]
927  variant_list = set()
928  or_variants = variants.split('|')
929  for or_variant in or_variants:
930    and_variants = or_variant.split('&')
931    variant = set()
932    for and_variant in and_variants:
933      and_variant = and_variant.strip()
934      if and_variant not in TOTAL_VARIANTS_SET:
935        raise ValueError('%s is not a valid variant' % (
936            and_variant))
937      variant.add(and_variant)
938    variant_list.add(frozenset(variant))
939  return variant_list
940
941def print_text(output, error=False):
942  if env.ART_TEST_RUN_FROM_SOONG and not error:
943    return  # Be quiet during build.
944  sys.stdout.write(output)
945  sys.stdout.flush()
946
947def print_analysis():
948  if not verbose:
949    # Without --verbose, the testrunner erases passing test info. It
950    # does that by overriding the printed text with white spaces all across
951    # the console width.
952    eraser_text = '\r' + ' ' * get_console_width() + '\r'
953    print_text(eraser_text)
954
955  # Prints information about the total tests run.
956  # E.g., "2/38 (5%) tests passed".
957  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
958  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
959      passed_test_count,
960      total_test_count,
961      (passed_test_count*100)/total_test_count,
962      'tests' if passed_test_count > 1 else 'test')
963  print_text(passed_test_information)
964
965  # Prints the list of skipped tests, if any.
966  if skipped_tests:
967    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
968    for test in skipped_tests:
969      print_text(test + '\n')
970    print_text('\n')
971
972  # Prints the list of failed tests, if any.
973  if failed_tests:
974    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n', error=True)
975    for test_info in failed_tests:
976      print_text(('%s\n%s\n' % (test_info[0], test_info[1])), error=True)
977    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
978    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
979      print_text(('%s\n' % (failed_test)))
980
981test_name_matcher = None
982def extract_test_name(test_name):
983  """Parses the test name and returns all the parts"""
984  global test_name_matcher
985  if test_name_matcher is None:
986    regex = '^test-art-'
987    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
988    regex += 'run-test-'
989    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
990    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
991    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
992    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
993    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
994    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
995    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
996    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
997    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
998    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
999    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
1000    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
1001    test_name_matcher = re.compile(regex)
1002  match = test_name_matcher.match(test_name)
1003  if match:
1004    return list(match.groups())
1005  raise ValueError(test_name + " is not a valid test")
1006
1007def parse_test_name(test_name):
1008  """Parses the testname provided by the user.
1009  It supports two types of test_name:
1010  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1011  exists and if it does, it returns the testname.
1012  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-ndebuggable-no-jvmti-001-HelloWorld32
1013  In this case, it will parse all the variants and check if they are placed
1014  correctly. If yes, it will set the various VARIANT_TYPES to use the
1015  variants required to run the test. Again, it returns the test_name
1016  without the variant information like 001-HelloWorld.
1017  """
1018  test_set = set()
1019  for test in RUN_TEST_SET:
1020    if test.startswith(test_name):
1021      test_set.add(test)
1022  if test_set:
1023    return test_set
1024
1025  parsed = extract_test_name(test_name)
1026  _user_input_variants['target'].add(parsed[0])
1027  _user_input_variants['run'].add(parsed[1])
1028  _user_input_variants['prebuild'].add(parsed[2])
1029  _user_input_variants['compiler'].add(parsed[3])
1030  _user_input_variants['relocate'].add(parsed[4])
1031  _user_input_variants['trace'].add(parsed[5])
1032  _user_input_variants['gc'].add(parsed[6])
1033  _user_input_variants['jni'].add(parsed[7])
1034  _user_input_variants['image'].add(parsed[8])
1035  _user_input_variants['debuggable'].add(parsed[9])
1036  _user_input_variants['jvmti'].add(parsed[10])
1037  _user_input_variants['address_sizes'].add(parsed[12])
1038  return {parsed[11]}
1039
1040
1041def get_target_cpu_count():
1042  if env.ART_TEST_ON_VM:
1043    command = f"{env.ART_SSH_CMD} cat /sys/devices/system/cpu/present"
1044  else:
1045    command = 'adb shell cat /sys/devices/system/cpu/present'
1046  cpu_info_proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
1047  cpu_info = cpu_info_proc.stdout.read()
1048  if type(cpu_info) is bytes:
1049    cpu_info = cpu_info.decode('utf-8')
1050  cpu_info_regex = r'\d*-(\d*)'
1051  match = re.match(cpu_info_regex, cpu_info)
1052  if match:
1053    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1054  else:
1055    raise ValueError('Unable to predict the concurrency for the target. '
1056                     'Is device connected?')
1057
1058
1059def get_host_cpu_count():
1060  return multiprocessing.cpu_count()
1061
1062
1063def parse_option():
1064  global verbose
1065  global dry_run
1066  global ignore_skips
1067  global n_thread
1068  global build
1069  global dist
1070  global gdb
1071  global gdb_arg
1072  global dump_cfg
1073  global gdb_dex2oat
1074  global gdb_dex2oat_args
1075  global runtime_option
1076  global run_test_option
1077  global timeout
1078  global dex2oat_jobs
1079  global run_all_configs
1080  global with_agent
1081  global csv_result
1082
1083  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1084  parser.add_argument('tests', action='extend', nargs="*", help='name(s) of the test(s)')
1085  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)'
1086      ' (deprecated: use positional arguments at the end without any option instead)')
1087  global_group = parser.add_argument_group('Global options',
1088                                           'Options that affect all tests being run')
1089  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1090                            Defaults to half of CPUs on target and all CPUs on host.""")
1091  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1092  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1093  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1094  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1095                            help="Skip the given test in all circumstances.")
1096  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1097                            help="""Don't skip any run-test configurations listed in
1098                            knownfailures.json.""")
1099  global_group.add_argument('--no-build-dependencies',
1100                            action='store_false', dest='build',
1101                            help="""Don't build dependencies under any circumstances. This is the
1102                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1103  global_group.add_argument('-b', '--build-dependencies',
1104                            action='store_true', dest='build',
1105                            help="""Build dependencies under all circumstances. By default we will
1106                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1107  global_group.add_argument('--dist',
1108                            action='store_true', dest='dist',
1109                            help="""If dependencies are to be built, pass `dist` to the build
1110                            command line. You may want to also set the DIST_DIR environment
1111                            variable when using this flag.""")
1112  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1113  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1114  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1115  global_group.add_argument('--dump-cfg', dest='dump_cfg',
1116                            help="""Dump the CFG to the specified host path.
1117                            Example \"--dump-cfg <full-path>/graph.cfg\".""")
1118  global_group.add_argument('--gdb-dex2oat', action='store_true', dest='gdb_dex2oat')
1119  global_group.add_argument('--gdb-dex2oat-args', dest='gdb_dex2oat_args')
1120  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1121                            default=[],
1122                            help="""Pass an option, unaltered, to the run-test script.
1123                            This should be enclosed in single-quotes to allow for spaces. The option
1124                            will be split using shlex.split() prior to invoking run-test.
1125                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\".""")
1126  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1127                            help="""Pass an agent to be attached to the runtime""")
1128  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1129                            help="""Pass an option to the runtime. Runtime options
1130                            starting with a '-' must be separated by a '=', for
1131                            example '--runtime-option=-Xjitthreshold:0'.""")
1132  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1133                            help='Number of dex2oat jobs')
1134  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1135                            help="Run all the possible configurations for the input test set")
1136  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1137                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1138  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1139    var_group = parser.add_argument_group(
1140        '{}-type Options'.format(variant_type),
1141        "Options that control the '{}' variants.".format(variant_type))
1142    var_group.add_argument('--all-' + variant_type,
1143                           action='store_true',
1144                           dest='all_' + variant_type,
1145                           help='Enable all variants of ' + variant_type)
1146    for variant in variant_set:
1147      flag = '--' + variant
1148      var_group.add_argument(flag, action='store_true', dest=variant)
1149
1150  options = vars(parser.parse_args())
1151  if options['csv_result'] is not None:
1152    csv_result = options['csv_result']
1153    setup_csv_result()
1154  # Handle the --all-<type> meta-options
1155  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1156    if options['all_' + variant_type]:
1157      for variant in variant_set:
1158        options[variant] = True
1159
1160  tests = None
1161  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1162  if options['tests']:
1163    tests = set()
1164    for test_name in options['tests']:
1165      tests |= parse_test_name(test_name)
1166
1167  for variant_type in VARIANT_TYPE_DICT:
1168    for variant in VARIANT_TYPE_DICT[variant_type]:
1169      if options.get(variant):
1170        _user_input_variants[variant_type].add(variant)
1171
1172  if options['verbose']:
1173    verbose = True
1174  if options['n_thread']:
1175    n_thread = max(1, options['n_thread'])
1176  ignore_skips = options['ignore_skips']
1177  if options['dry_run']:
1178    dry_run = True
1179    verbose = True
1180  build = options['build']
1181  dist = options['dist']
1182  if options['gdb']:
1183    n_thread = 1
1184    gdb = True
1185    if options['gdb_arg']:
1186      gdb_arg = options['gdb_arg']
1187  if options['dump_cfg']:
1188    dump_cfg = options['dump_cfg']
1189  if options['gdb_dex2oat']:
1190    n_thread = 1
1191    gdb_dex2oat = True
1192    if options['gdb_dex2oat_args']:
1193      gdb_dex2oat_args = options['gdb_dex2oat_args']
1194  runtime_option = options['runtime_option'];
1195  with_agent = options['with_agent'];
1196  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1197
1198  timeout = options['timeout']
1199  if options['dex2oat_jobs']:
1200    dex2oat_jobs = options['dex2oat_jobs']
1201  if options['run_all']:
1202    run_all_configs = True
1203
1204  return tests or RUN_TEST_SET
1205
1206def main():
1207  gather_test_info()
1208  tests = parse_option()
1209  setup_test_env()
1210  gather_disabled_test_info()
1211  if build:
1212    build_targets = []
1213    # Build only the needed shards (depending on the selected tests).
1214    shards = set(re.search("(\d\d)-", t).group(1) for t in tests)
1215    if any("hiddenapi" in t for t in tests):
1216      shards.add("HiddenApi")  # Include special HiddenApi shard.
1217    for mode in ['host', 'target', 'jvm']:
1218      if mode in _user_input_variants['target']:
1219        build_targets += ['test-art-{}-run-test-dependencies'.format(mode)]
1220        if len(shards) >= 100:
1221          build_targets += ["art-run-test-{}-data".format(mode)]  # Build all.
1222        else:
1223          build_targets += ["art-run-test-{}-data-shard{}".format(mode, s) for s in shards]
1224    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1225    build_command += ' D8='
1226    if dist:
1227      build_command += ' dist'
1228    build_command += ' ' + ' '.join(build_targets)
1229    print_text('Build command: %s\n' % build_command)
1230    if subprocess.call(build_command.split()):
1231      # Debugging for b/62653020
1232      if env.DIST_DIR:
1233        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1234      sys.exit(1)
1235
1236  run_tests(tests)
1237
1238  print_analysis()
1239  close_csv_file()
1240
1241  exit_code = 0 if len(failed_tests) == 0 else 1
1242  sys.exit(exit_code)
1243
1244if __name__ == '__main__':
1245  main()
1246