xref: /aosp_15_r20/tools/asuite/atest/result_reporter.py (revision c2e18aaa1096c836b086f94603d04f4eb9cf37f5)
1# Copyright 2018, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# pylint: disable=import-outside-toplevel
16
17"""Result Reporter
18
19The result reporter formats and prints test results.
20
21----
22Example Output for command to run following tests:
23CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests
24
25Running Tests ...
26
27CtsAnimationTestCases
28---------------------
29
30android.animation.cts.EvaluatorTest.UnitTests (7 Tests)
31[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms)
32[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms)
33[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms)
34[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms)
35[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms)
36[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms)
37[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms)
38
39HelloWorldTests
40---------------
41
42android.test.example.helloworld.UnitTests(2 Tests)
43[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms)
44[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED
45(1ms)
46
47WmTests
48-------
49
50com.android.tradefed.targetprep.UnitTests (1 Test)
51RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError:
52Failed to install WmTests.apk on 127.0.0.1:54373. Reason:
53    error message ...
54
55
56Summary
57-------
58CtsAnimationTestCases: Passed: 7, Failed: 0
59HelloWorldTests: Passed: 2, Failed: 0
60WmTests: Passed: 0, Failed: 0 (Completed With ERRORS)
61
621 test failed
63"""
64
65from __future__ import print_function
66
67from collections import OrderedDict
68import logging
69import os
70import pathlib
71import re
72import zipfile
73
74from atest import atest_configs
75from atest import atest_utils as au
76from atest import constants
77from atest.atest_enum import ExitCode
78from atest.test_runners import test_runner_base
79
80UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
81FAILURE_FLAG = 'RUNNER_FAILURE'
82BENCHMARK_ESSENTIAL_KEYS = {
83    'repetition_index',
84    'cpu_time',
85    'name',
86    'repetitions',
87    'run_type',
88    'threads',
89    'time_unit',
90    'iterations',
91    'run_name',
92    'real_time',
93}
94# TODO(b/146875480): handle the optional benchmark events
95BENCHMARK_OPTIONAL_KEYS = {'bytes_per_second', 'label'}
96BENCHMARK_EVENT_KEYS = BENCHMARK_ESSENTIAL_KEYS.union(BENCHMARK_OPTIONAL_KEYS)
97INT_KEYS = {}
98ITER_SUMMARY = {}
99ITER_COUNTS = {}
100
101
102class PerfInfo:
103  """Class for storing performance test of a test run."""
104
105  def __init__(self):
106    """Initialize a new instance of PerfInfo class."""
107    # perf_info: A list of benchmark_info(dict).
108    self.perf_info = []
109
110  def update_perf_info(self, test):
111    """Update perf_info with the given result of a single test.
112
113    Args:
114        test: A TestResult namedtuple.
115    """
116    all_additional_keys = set(test.additional_info.keys())
117    # Ensure every key is in all_additional_keys.
118    if not BENCHMARK_ESSENTIAL_KEYS.issubset(all_additional_keys):
119      return
120    benchmark_info = {}
121    benchmark_info['test_name'] = test.test_name
122    for key, data in test.additional_info.items():
123      if key in INT_KEYS:
124        data_to_int = data.split('.')[0]
125        benchmark_info[key] = data_to_int
126      elif key in BENCHMARK_EVENT_KEYS:
127        benchmark_info[key] = data
128    if benchmark_info:
129      self.perf_info.append(benchmark_info)
130
131  def print_perf_info(self):
132    """Print summary of a perf_info."""
133    if not self.perf_info:
134      return
135    classify_perf_info, max_len = self._classify_perf_info()
136    separator = '-' * au.get_terminal_size()[0]
137    print(separator)
138    print(
139        '{:{name}}    {:^{real_time}}    {:^{cpu_time}}    '
140        '{:>{iterations}}'.format(
141            'Benchmark',
142            'Time',
143            'CPU',
144            'Iteration',
145            name=max_len['name'] + 3,
146            real_time=max_len['real_time'] + max_len['time_unit'] + 1,
147            cpu_time=max_len['cpu_time'] + max_len['time_unit'] + 1,
148            iterations=max_len['iterations'],
149        )
150    )
151    print(separator)
152    for module_name, module_perf_info in classify_perf_info.items():
153      print('{}:'.format(module_name))
154      for benchmark_info in module_perf_info:
155        # BpfBenchMark/MapWriteNewEntry/1    1530 ns     1522 ns   460517
156        print(
157            '  #{:{name}}    {:>{real_time}} {:{time_unit}}    '
158            '{:>{cpu_time}} {:{time_unit}}    '
159            '{:>{iterations}}'.format(
160                benchmark_info['name'],
161                benchmark_info['real_time'],
162                benchmark_info['time_unit'],
163                benchmark_info['cpu_time'],
164                benchmark_info['time_unit'],
165                benchmark_info['iterations'],
166                name=max_len['name'],
167                real_time=max_len['real_time'],
168                time_unit=max_len['time_unit'],
169                cpu_time=max_len['cpu_time'],
170                iterations=max_len['iterations'],
171            )
172        )
173
174  def _classify_perf_info(self):
175    """Classify the perf_info by test module name.
176
177    Returns:
178        A tuple of (classified_perf_info, max_len), where
179        classified_perf_info: A dict of perf_info and each perf_info are
180                             belong to different modules.
181            e.g.
182                { module_name_01: [perf_info of module_1],
183                  module_name_02: [perf_info of module_2], ...}
184        max_len: A dict which stores the max length of each event.
185                 It contains the max string length of 'name', real_time',
186                 'time_unit', 'cpu_time', 'iterations'.
187            e.g.
188                {name: 56, real_time: 9, time_unit: 2, cpu_time: 8,
189                 iterations: 12}
190    """
191    module_categories = set()
192    max_len = {}
193    all_name = []
194    all_real_time = []
195    all_time_unit = []
196    all_cpu_time = []
197    all_iterations = ['Iteration']
198    for benchmark_info in self.perf_info:
199      module_categories.add(benchmark_info['test_name'].split('#')[0])
200      all_name.append(benchmark_info['name'])
201      all_real_time.append(benchmark_info['real_time'])
202      all_time_unit.append(benchmark_info['time_unit'])
203      all_cpu_time.append(benchmark_info['cpu_time'])
204      all_iterations.append(benchmark_info['iterations'])
205    classified_perf_info = {}
206    for module_name in module_categories:
207      module_perf_info = []
208      for benchmark_info in self.perf_info:
209        if benchmark_info['test_name'].split('#')[0] == module_name:
210          module_perf_info.append(benchmark_info)
211      classified_perf_info[module_name] = module_perf_info
212    max_len = {
213        'name': len(max(all_name, key=len)),
214        'real_time': len(max(all_real_time, key=len)),
215        'time_unit': len(max(all_time_unit, key=len)),
216        'cpu_time': len(max(all_cpu_time, key=len)),
217        'iterations': len(max(all_iterations, key=len)),
218    }
219    return classified_perf_info, max_len
220
221
222class RunStat:
223  """Class for storing stats of a test run."""
224
225  def __init__(
226      self, passed=0, failed=0, ignored=0, run_errors=False, assumption_failed=0
227  ):
228    """Initialize a new instance of RunStat class.
229
230    Args:
231        passed: Count of passing tests.
232        failed: Count of failed tests.
233        ignored: Count of ignored tests.
234        assumption_failed: Count of assumption failure tests.
235        run_errors: A boolean if there were run errors
236    """
237    # TODO(b/109822985): Track group and run estimated totals for updating
238    # summary line
239    self.passed = passed
240    self.failed = failed
241    self.ignored = ignored
242    self.assumption_failed = assumption_failed
243    self.perf_info = PerfInfo()
244    # Run errors are not for particular tests, they are runner errors.
245    self.run_errors = run_errors
246
247  @property
248  def total(self):
249    """Getter for total tests actually ran. Accessed via self.total"""
250    return self.passed + self.failed
251
252
253class ResultReporter:
254  """Result Reporter class.
255
256  As each test is run, the test runner will call self.process_test_result()
257  with a TestResult namedtuple that contains the following information:
258  - runner_name:   Name of the test runner
259  - group_name:    Name of the test group if any.
260                   In Tradefed that's the Module name.
261  - test_name:     Name of the test.
262                   In Tradefed that's qualified.class#Method
263  - status:        The strings FAILED or PASSED.
264  - stacktrace:    The stacktrace if the test failed.
265  - group_total:   The total tests scheduled to be run for a group.
266                   In Tradefed this is provided when the Module starts.
267  - runner_total:  The total tests scheduled to be run for the runner.
268                   In Tradefed this is not available so is None.
269
270  The Result Reporter will print the results of this test and then update
271  its stats state.
272
273  Test stats are stored in the following structure:
274  - self.run_stats: Is RunStat instance containing stats for the overall run.
275                    This include pass/fail counts across ALL test runners.
276
277  - self.runners:  Is of the form: {RunnerName: {GroupName: RunStat Instance}}
278                   Where {} is an ordered dict.
279
280                   The stats instance contains stats for each test group.
281                   If the runner doesn't support groups, then the group
282                   name will be None.
283
284  For example this could be a state of ResultReporter:
285
286  run_stats: RunStat(passed:10, failed:5)
287  runners: {'AtestTradefedTestRunner':
288                          {'Module1': RunStat(passed:1, failed:1),
289                           'Module2': RunStat(passed:0, failed:4)},
290            'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)},
291            'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}}
292  """
293
294  def __init__(
295      self,
296      silent=False,
297      collect_only=False,
298      wait_for_debugger=False,
299      args=None,
300      test_infos=None,
301  ):
302    """Init ResultReporter.
303
304    Args:
305        silent: A boolean of silence or not.
306    """
307    self.run_stats = RunStat()
308    self.runners = OrderedDict()
309    self.failed_tests = []
310    self.all_test_results = []
311    self.pre_test = None
312    self.log_path = None
313    self.silent = silent
314    self.rerun_options = ''
315    self.collect_only = collect_only
316    self.test_result_link = None
317    self.device_count = 0
318    self.wait_for_debugger = wait_for_debugger
319    self._args = args
320    self._test_infos = test_infos or []
321
322  def get_test_results_by_runner(self, runner_name):
323    return [t for t in self.all_test_results if t.runner_name == runner_name]
324
325  def process_test_result(self, test):
326    """Given the results of a single test, update stats and print results.
327
328    Args:
329        test: A TestResult namedtuple.
330    """
331    if test.runner_name not in self.runners:
332      self.runners[test.runner_name] = OrderedDict()
333    assert self.runners[test.runner_name] != FAILURE_FLAG
334    self.all_test_results.append(test)
335    if test.group_name not in self.runners[test.runner_name]:
336      self.runners[test.runner_name][test.group_name] = RunStat()
337      self._print_group_title(test)
338    self._update_stats(test, self.runners[test.runner_name][test.group_name])
339    self._print_result(test)
340
341  def runner_failure(self, runner_name, failure_msg):
342    """Report a runner failure.
343
344    Use instead of process_test_result() when runner fails separate from
345    any particular test, e.g. during setup of runner.
346
347    Args:
348        runner_name: A string of the name of the runner.
349        failure_msg: A string of the failure message to pass to user.
350    """
351    self.runners[runner_name] = FAILURE_FLAG
352
353    print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
354    print(
355        au.mark_red(
356            'Runner encountered a critical failure. Skipping.\nFAILURE: %s'
357            % failure_msg
358        )
359    )
360
361  def register_unsupported_runner(self, runner_name):
362    """Register an unsupported runner.
363
364    Prints the following to the screen:
365
366    RunnerName
367    ----------
368    This runner does not support normal results formatting.
369    Below is the raw output of the test runner.
370
371    RAW OUTPUT:
372    <Raw Runner Output>
373
374    Args:
375       runner_name: A String of the test runner's name.
376    """
377    assert runner_name not in self.runners
378    self.runners[runner_name] = UNSUPPORTED_FLAG
379    print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
380    print(
381        'This runner does not support normal results formatting. Below '
382        'is the raw output of the test runner.\n\nRAW OUTPUT:'
383    )
384
385  def print_starting_text(self):
386    """Print starting text for running tests."""
387    if self.wait_for_debugger:
388      print(
389          au.mark_red(
390              '\nDebugging Tests [you may need to attach a debugger for the'
391              ' process to continue...]'
392          )
393      )
394    else:
395      print(au.mark_cyan('\nRunning Tests...'))
396
397  def set_current_iteration_summary(self, iteration_num: int) -> None:
398    """Add the given iteration's current summary to the list of its existing summaries."""
399    run_summary = []
400    for runner_name, groups in self.runners.items():
401      for group_name, stats in groups.items():
402        name = group_name if group_name else runner_name
403        test_run_name = (
404            self.all_test_results[-1].test_run_name
405            if self.all_test_results[-1].test_run_name != name
406            else None
407        )
408        summary = self.process_summary(name, stats, test_run_name=test_run_name)
409        run_summary.append(summary)
410    summary_list = ITER_SUMMARY.get(iteration_num, [])
411    summary_list.extend(run_summary)
412    ITER_SUMMARY[iteration_num] = summary_list
413
414  def get_iterations_summary(self) -> None:
415    """Print the combined summary of all the iterations."""
416    total_summary = ''
417    for key, value in ITER_COUNTS.items():
418      total_summary += '%s: %s: %s, %s: %s, %s: %s, %s: %s\n' % (
419          key,
420          'Passed',
421          value.get('passed', 0),
422          'Failed',
423          value.get('failed', 0),
424          'Ignored',
425          value.get('ignored', 0),
426          'Assumption_failed',
427          value.get('assumption_failed', 0),
428      )
429    return f"{au.delimiter('-', 7)}\nITERATIONS RESULT\n{total_summary}"
430
431  # pylint: disable=too-many-branches
432  def print_summary(self):
433    """Print summary of all test runs.
434
435    Returns:
436        0 if all tests pass, non-zero otherwise.
437    """
438    if self.collect_only:
439      return self.print_collect_tests()
440    tests_ret = ExitCode.SUCCESS
441    if not self.runners:
442      return tests_ret
443    if not self.device_count:
444      device_detail = ''
445    elif self.device_count == 1:
446      device_detail = '(Test executed with 1 device.)'
447    else:
448      device_detail = f'(Test executed with {self.device_count} devices.)'
449    print('\n{}'.format(au.mark_cyan(f'Summary {device_detail}')))
450    print(au.delimiter('-', 7))
451
452    multi_iterations = len(ITER_SUMMARY) > 1
453    for iter_num, summary_list in ITER_SUMMARY.items():
454      if multi_iterations:
455        print(au.mark_blue('ITERATION %s' % (int(iter_num) + 1)))
456      for summary in summary_list:
457        print(summary)
458    if multi_iterations:
459      print(self.get_iterations_summary())
460
461    failed_sum = len(self.failed_tests)
462    for runner_name, groups in self.runners.items():
463      if groups == UNSUPPORTED_FLAG:
464        print(
465            f'Pretty output does not support {runner_name}. '
466            r'See raw output above.'
467        )
468        continue
469      if groups == FAILURE_FLAG:
470        tests_ret = ExitCode.TEST_FAILURE
471        print(runner_name, 'Crashed. No results to report.')
472        failed_sum += 1
473        continue
474      for group_name, stats in groups.items():
475        name = group_name if group_name else runner_name
476        summary = self.process_summary(name, stats)
477        if stats.failed > 0 or stats.run_errors:
478          tests_ret = ExitCode.TEST_FAILURE
479          if stats.run_errors:
480            failed_sum += 1 if not stats.failed else 0
481        if not ITER_SUMMARY:
482          print(summary)
483
484    self.run_stats.perf_info.print_perf_info()
485    print()
486    if not UNSUPPORTED_FLAG in self.runners.values():
487      if tests_ret == ExitCode.SUCCESS:
488        print(au.mark_green('All tests passed!'))
489      else:
490        message = '%d %s failed' % (
491            failed_sum,
492            'tests' if failed_sum > 1 else 'test',
493        )
494        print(au.mark_red(message))
495        print('-' * len(message))
496        self.print_failed_tests()
497
498    self._print_perf_test_metrics()
499    # TODO(b/174535786) Error handling while uploading test results has
500    # unexpected exceptions.
501    # TODO (b/174627499) Saving this information in atest history.
502    if self.test_result_link:
503      print('Test Result uploaded to %s' % au.mark_green(self.test_result_link))
504    return tests_ret
505
506  def _print_perf_test_metrics(self) -> bool:
507    """Print perf test metrics text content to console.
508
509    Returns:
510        True if metric printing is attempted; False if not perf tests.
511    """
512    if not any(
513        'performance-tests' in info.compatibility_suites
514        for info in self._test_infos
515    ):
516      return False
517
518    if not self.log_path:
519      return True
520
521    aggregated_metric_files = au.find_files(
522        self.log_path, file_name='*_aggregate_test_metrics_*.txt'
523    )
524
525    if self._args.perf_itr_metrics:
526      individual_metric_files = au.find_files(
527          self.log_path, file_name='test_results_*.txt'
528      )
529      print('\n{}'.format(au.mark_cyan('Individual test metrics')))
530      print(au.delimiter('-', 7))
531      for metric_file in individual_metric_files:
532        metric_file_path = pathlib.Path(metric_file)
533        # Skip aggregate metrics as we are printing individual metrics here.
534        if '_aggregate_test_metrics_' in metric_file_path.name:
535          continue
536        print('{}:'.format(au.mark_cyan(metric_file_path.name)))
537        print(
538            ''.join(
539                f'{" "*4}{line}'
540                for line in metric_file_path.read_text(
541                    encoding='utf-8'
542                ).splitlines(keepends=True)
543            )
544        )
545
546    print('\n{}'.format(au.mark_cyan('Aggregate test metrics')))
547    print(au.delimiter('-', 7))
548    for metric_file in aggregated_metric_files:
549      self._print_test_metric(pathlib.Path(metric_file))
550
551    return True
552
553  def _print_test_metric(self, metric_file: pathlib.Path) -> None:
554    """Print the content of the input metric file."""
555    test_metrics_re = re.compile(
556        r'test_results.*\s(.*)_aggregate_test_metrics_.*\.txt'
557    )
558    if not metric_file.is_file():
559      return
560    matches = re.findall(test_metrics_re, metric_file.as_posix())
561    test_name = matches[0] if matches else ''
562    if test_name:
563      print('{}:'.format(au.mark_cyan(test_name)))
564      with metric_file.open('r', encoding='utf-8') as f:
565        matched = False
566        filter_res = self._args.aggregate_metric_filter
567        logging.debug('Aggregate metric filters: %s', filter_res)
568        test_methods = []
569        # Collect all test methods
570        if filter_res:
571          test_re = re.compile(r'\n\n(\S+)\n\n', re.MULTILINE)
572          test_methods = re.findall(test_re, f.read())
573          f.seek(0)
574          # The first line of the file is also a test method but could
575          # not parsed by test_re; add the first line manually.
576          first_line = f.readline()
577          test_methods.insert(0, str(first_line).strip())
578          f.seek(0)
579        for line in f.readlines():
580          stripped_line = str(line).strip()
581          if filter_res:
582            if stripped_line in test_methods:
583              print()
584              au.colorful_print(' ' * 4 + stripped_line, constants.MAGENTA)
585            for filter_re in filter_res:
586              if re.match(re.compile(filter_re), line):
587                matched = True
588                print(' ' * 4 + stripped_line)
589          else:
590            matched = True
591            print(' ' * 4 + stripped_line)
592        if not matched:
593          au.colorful_print(
594              '  Warning: Nothing returned by the pattern: {}'.format(
595                  filter_res
596              ),
597              constants.RED,
598          )
599        print()
600
601  def print_collect_tests(self):
602    """Print summary of collect tests only.
603
604    Returns:
605        0 if all tests collection done.
606    """
607    tests_ret = ExitCode.SUCCESS
608    if not self.runners:
609      return tests_ret
610    print(f'\n{au.mark_cyan("Summary: "+ constants.COLLECT_TESTS_ONLY)}')
611    print(au.delimiter('-', 26))
612    for runner_name, groups in self.runners.items():
613      for group_name, _ in groups.items():
614        name = group_name if group_name else runner_name
615        print(name)
616    return ExitCode.SUCCESS
617
618  def print_failed_tests(self):
619    """Print the failed tests if existed."""
620    if self.failed_tests:
621      for test_name in self.failed_tests:
622        print(test_name)
623
624  def process_summary(self, name, stats, test_run_name=None):
625    """Process the summary line.
626
627    Strategy:
628        Error status happens ->
629            SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red>
630            SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With
631            ERRORS)</red>
632        More than 1 test fails ->
633            SomeTests: Passed: 2, <red>Failed</red>: 5
634        No test fails ->
635            SomeTests: <green>Passed</green>: 2, Failed: 0
636
637    Args:
638        name: A string of test name.
639        stats: A RunStat instance for a test group.
640        test_run_name: A string of test run name (optional)
641
642    Returns:
643        A summary of the test result.
644    """
645    passed_label = 'Passed'
646    failed_label = 'Failed'
647    ignored_label = 'Ignored'
648    assumption_failed_label = 'Assumption Failed'
649    error_label = ''
650    host_log_content = ''
651    if stats.failed > 0:
652      failed_label = au.mark_red(failed_label)
653    if stats.run_errors:
654      error_label = au.mark_red('(Completed With ERRORS)')
655      # Only extract host_log_content if test name is tradefed
656      # Import here to prevent circular-import error.
657      from atest.test_runners import atest_tf_test_runner
658
659      if name == atest_tf_test_runner.AtestTradefedTestRunner.NAME:
660        find_logs = au.find_files(
661            self.log_path, file_name=constants.TF_HOST_LOG
662        )
663        if find_logs:
664          host_log_content = au.mark_red('\n\nTradefederation host log:\n')
665        for tf_log in find_logs:
666          if zipfile.is_zipfile(tf_log):
667            host_log_content = host_log_content + au.extract_zip_text(tf_log)
668          else:
669            with open(tf_log, 'r', encoding='utf-8') as f:
670              for line in f.readlines():
671                host_log_content = host_log_content + line
672
673      # Print the content for the standard error file for a single module.
674      if name and self.log_path and len(str(name).split()) > 1:
675        log_name = str(name).split()[1] + '-stderr_*.txt'
676        module_logs = au.find_files(self.log_path, file_name=log_name)
677        for log_file in module_logs:
678          print(
679              ' ' * 2
680              + au.mark_magenta(f'Logs in {os.path.basename(log_file)}:')
681          )
682          with open(log_file, 'r', encoding='utf-8') as f:
683            for line in f.readlines():
684              print(' ' * 2 + str(line), end='')
685    elif stats.failed == 0:
686      passed_label = au.mark_green(passed_label)
687    temp = ITER_COUNTS.get(name, {})
688    temp['passed'] = temp.get('passed', 0) + stats.passed
689    temp['failed'] = temp.get('failed', 0) + stats.failed
690    temp['ignored'] = temp.get('ignored', 0) + stats.ignored
691    temp['assumption_failed'] = (
692        temp.get('assumption_failed', 0) + stats.assumption_failed
693    )
694    ITER_COUNTS[name] = temp
695
696    summary_name = f'{name}:{test_run_name}' if test_run_name else name
697    summary = '%s: %s: %s, %s: %s, %s: %s, %s: %s %s %s' % (
698        summary_name,
699        passed_label,
700        stats.passed,
701        failed_label,
702        stats.failed,
703        ignored_label,
704        stats.ignored,
705        assumption_failed_label,
706        stats.assumption_failed,
707        error_label,
708        host_log_content,
709    )
710    return summary
711
712  def _update_stats(self, test, group):
713    """Given the results of a single test, update test run stats.
714
715    Args:
716        test: a TestResult namedtuple.
717        group: a RunStat instance for a test group.
718    """
719    # TODO(109822985): Track group and run estimated totals for updating
720    # summary line
721    if test.status == test_runner_base.PASSED_STATUS:
722      self.run_stats.passed += 1
723      group.passed += 1
724    elif test.status == test_runner_base.IGNORED_STATUS:
725      self.run_stats.ignored += 1
726      group.ignored += 1
727    elif test.status == test_runner_base.ASSUMPTION_FAILED:
728      self.run_stats.assumption_failed += 1
729      group.assumption_failed += 1
730    elif test.status == test_runner_base.FAILED_STATUS:
731      self.run_stats.failed += 1
732      self.failed_tests.append(test.test_name)
733      group.failed += 1
734    elif test.status == test_runner_base.ERROR_STATUS:
735      self.run_stats.run_errors = True
736      group.run_errors = True
737    self.run_stats.perf_info.update_perf_info(test)
738
739  def _print_group_title(self, test):
740    """Print the title line for a test group.
741
742    Test Group/Runner Name
743    ----------------------
744
745    Args:
746        test: A TestResult namedtuple.
747    """
748    if self.silent:
749      return
750    title = test.group_name or test.runner_name
751    underline = '-' * (len(title))
752    print('\n%s\n%s' % (title, underline))
753
754  # pylint: disable=too-many-branches
755  def _print_result(self, test):
756    """Print the results of a single test.
757
758       Looks like:
759       fully.qualified.class#TestMethod: PASSED/FAILED
760
761    Args:
762        test: a TestResult namedtuple.
763    """
764    if self.silent:
765      return
766    if not self.pre_test or (test.test_run_name != self.pre_test.test_run_name):
767      print(
768          '%s (%s %s)'
769          % (
770              au.mark_blue(test.test_run_name),
771              test.group_total,
772              'Test' if test.group_total == 1 else 'Tests',
773          )
774      )
775    if test.status == test_runner_base.ERROR_STATUS:
776      print('RUNNER ERROR: %s\n' % test.details)
777      self.pre_test = test
778      return
779    if test.test_name:
780      color = ''
781      if test.status == test_runner_base.PASSED_STATUS:
782        # Example of output:
783        # [78/92] test_name: PASSED (92ms)
784        color = constants.GREEN
785      elif test.status in (
786          test_runner_base.IGNORED_STATUS,
787          test_runner_base.ASSUMPTION_FAILED,
788      ):
789        # Example: [33/92] test_name: IGNORED (12ms)
790        # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms)
791        color = constants.MAGENTA
792      else:
793        # Example: [26/92] test_name: FAILED (32ms)
794        color = constants.RED
795      print(
796          '[{}/{}] {}'.format(
797              test.test_count, test.group_total, test.test_name
798          ),
799          end='',
800      )
801      if self.collect_only:
802        print()
803      else:
804        print(': {} {}'.format(au.colorize(test.status, color), test.test_time))
805      if test.status == test_runner_base.PASSED_STATUS:
806        for key, data in sorted(test.additional_info.items()):
807          if key not in BENCHMARK_EVENT_KEYS:
808            print(f'\t{au.mark_blue(key)}: {data}')
809      if test.status == test_runner_base.FAILED_STATUS:
810        print(f'\nSTACKTRACE:\n{test.details}')
811    self.pre_test = test
812