xref: /aosp_15_r20/tools/asuite/atest/atest_utils.py (revision c2e18aaa1096c836b086f94603d04f4eb9cf37f5)
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Utility functions for atest."""
16
17
18# pylint: disable=import-outside-toplevel
19# pylint: disable=too-many-lines
20
21from __future__ import print_function
22
23from collections import deque
24from dataclasses import dataclass
25import datetime
26import enum
27import fnmatch
28import hashlib
29import html
30import importlib.resources
31import importlib.util
32import io
33import itertools
34import json
35import logging
36from multiprocessing import Process
37import os
38from pathlib import Path
39import pickle
40import platform
41import re
42import shutil
43import subprocess
44import sys
45import threading
46from threading import Thread
47import traceback
48from typing import Any, Dict, IO, List, Set, Tuple
49import urllib
50import xml.etree.ElementTree as ET
51import zipfile
52
53from atest import atest_decorator
54from atest import constants
55from atest.atest_enum import DetectType, ExitCode, FilterType
56from atest.metrics import metrics
57from atest.metrics import metrics_utils
58from atest.tf_proto import test_record_pb2
59
60DEFAULT_OUTPUT_ROLLING_LINES = 6
61_BASH_CLEAR_PREVIOUS_LINE_CODE = '\033[F\033[K'
62_BASH_RESET_CODE = '\033[0m'
63DIST_OUT_DIR = Path(
64    os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd()) + '/out/dist/'
65)
66MAINLINE_MODULES_EXT_RE = re.compile(r'\.(apex|apks|apk)$')
67TEST_WITH_MAINLINE_MODULES_RE = re.compile(
68    r'(?P<test>.*)\[(?P<mainline_modules>.*' r'[.](apk|apks|apex))\]$'
69)
70
71# Arbitrary number to limit stdout for failed runs in run_limited_output.
72# Reason for its use is that the make command itself has its own carriage
73# return output mechanism that when collected line by line causes the streaming
74# full_output list to be extremely large.
75_FAILED_OUTPUT_LINE_LIMIT = 100
76# Regular expression to match the start of a ninja compile:
77# ex: [ 99% 39710/39711]
78_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
79_BUILD_FAILURE = 'FAILED: '
80BUILD_TOP_HASH = hashlib.md5(
81    os.environ.get(constants.ANDROID_BUILD_TOP, '').encode()
82).hexdigest()
83_DEFAULT_TERMINAL_WIDTH = 80
84_DEFAULT_TERMINAL_HEIGHT = 25
85_BUILD_CMD = 'build/soong/soong_ui.bash'
86_FIND_MODIFIED_FILES_CMDS = (
87    'cd {};'
88    'local_branch=$(git rev-parse --abbrev-ref HEAD);'
89    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
90    # Get the number of commits from local branch to remote branch.
91    'ahead=$(git rev-list --left-right --count $local_branch...$remote_branch '
92    "| awk '{{print $1}}');"
93    # Get the list of modified files from HEAD to previous $ahead generation.
94    'git diff HEAD~$ahead --name-only'
95)
96_ANDROID_BUILD_EXT = ('.bp', '.mk')
97
98# Set of special chars for various purposes.
99_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
100_WILDCARD_CHARS = {'?', '*'}
101
102_WILDCARD_FILTER_RE = re.compile(r'.*[?|*]$')
103_REGULAR_FILTER_RE = re.compile(r'.*\w$')
104
105SUGGESTIONS = {
106    # (b/177626045) If Atest does not install target application properly.
107    'Runner reported an invalid method': 'Please reflash the device(s).',
108}
109
110_BUILD_ENV = {}
111
112CACHE_VERSION = 1
113
114_original_sys_stdout = sys.stdout
115
116
117@dataclass
118class BuildEnvProfiler:
119  """Represents the condition before and after trigging build."""
120
121  ninja_file: Path
122  ninja_file_mtime: float
123  variable_file: Path
124  variable_file_md5: str
125  clean_out: bool
126  build_files_integrity: bool
127
128
129@enum.unique
130class BuildOutputMode(enum.Enum):
131  'Represents the different ways to display build output.'
132
133  STREAMED = 'streamed'
134  LOGGED = 'logged'
135
136  def __init__(self, arg_name: str):
137    self._description = arg_name
138
139  # pylint: disable=missing-function-docstring
140  def description(self):
141    return self._description
142
143
144@dataclass
145class AndroidVariables:
146  """Class that stores the value of environment variables."""
147
148  build_top: str
149  product_out: str
150  target_out_cases: str
151  host_out: str
152  host_out_cases: str
153  target_product: str
154  build_variant: str
155
156  def __init__(self):
157    self.build_top = os.getenv('ANDROID_BUILD_TOP')
158    self.product_out = os.getenv('ANDROID_PRODUCT_OUT')
159    self.target_out_cases = os.getenv('ANDROID_TARGET_OUT_TESTCASES')
160    self.host_out = os.getenv('ANDROID_HOST_OUT')
161    self.host_out_cases = os.getenv('ANDROID_HOST_OUT_TESTCASES')
162    self.target_product = os.getenv('TARGET_PRODUCT')
163    self.build_variant = os.getenv('TARGET_BUILD_VARIANT')
164
165
166def get_build_top(*joinpaths: Any) -> Path:
167  """Get the absolute path from the given repo path."""
168  return Path(AndroidVariables().build_top, *joinpaths)
169
170
171def get_host_out(*joinpaths: Any) -> Path:
172  """Get the absolute host out path from the given path."""
173  return Path(AndroidVariables().host_out, *joinpaths)
174
175
176def get_product_out(*joinpaths: Any) -> Path:
177  """Get the absolute product out path from the given path."""
178  return Path(AndroidVariables().product_out, *joinpaths)
179
180
181def get_index_path(*filename: Any) -> Path:
182  """Get absolute path of the desired index file."""
183  return get_host_out('indices', *filename)
184
185
186def getenv_abs_path(env: str, suffix: str = None) -> Path:
187  """Translate the environment variable to an absolute path.
188
189  Args:
190      env: string of the given environment variable.
191      suffix: string that will be appended to.
192
193  Returns:
194      Absolute Path of the given environment variable.
195  """
196  env_value = os.getenv(env)
197  if not env_value:
198    return None
199
200  env_path = Path(env_value)
201  if env_path.is_absolute():
202    return env_path.joinpath(suffix) if suffix else env_path
203
204  return get_build_top(env_path, suffix) if suffix else get_build_top(env_path)
205
206
207def get_build_cmd(dump=False):
208  """Compose build command with no-absolute path and flag "--make-mode".
209
210  Args:
211      dump: boolean that determines the option of build/soong/soong_iu.bash.
212            True: used to dump build variables, equivalent to printconfig. e.g.
213              build/soong/soong_iu.bash --dumpvar-mode <VAR_NAME>
214            False: (default) used to build targets in make mode. e.g.
215              build/soong/soong_iu.bash --make-mode <MOD_NAME>
216
217  Returns:
218      A list of soong build command.
219  """
220  make_cmd = '%s/%s' % (
221      os.path.relpath(
222          os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()
223      ),
224      _BUILD_CMD,
225  )
226  if dump:
227    return [make_cmd, '--dumpvar-mode', 'report_config']
228  return [
229      make_cmd,
230      '--make-mode',
231      'WRAPPER_TOOL=atest',
232      f'ATEST_RUN_ID={metrics.get_run_id()}',
233  ]
234
235
236def _capture_fail_section(full_log):
237  """Return the error message from the build output.
238
239  Args:
240      full_log: List of strings representing full output of build.
241
242  Returns:
243      capture_output: List of strings that are build errors.
244  """
245  am_capturing = False
246  capture_output = []
247  for line in full_log:
248    if am_capturing and _BUILD_COMPILE_STATUS.match(line):
249      break
250    if am_capturing or line.startswith(_BUILD_FAILURE):
251      capture_output.append(line)
252      am_capturing = True
253      continue
254  return capture_output
255
256
257def _capture_limited_output(full_log):
258  """Return the limited error message from capture_failed_section.
259
260  Args:
261      full_log: List of strings representing full output of build.
262
263  Returns:
264      output: List of strings that are build errors.
265  """
266  # Parse out the build error to output.
267  output = _capture_fail_section(full_log)
268  if not output:
269    output = full_log
270  if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
271    output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
272  output = 'Output (may be trimmed):\n%s' % ''.join(output)
273  return output
274
275
276def stream_io_output(
277    io_input: IO,
278    max_lines=None,
279    full_output_receiver: IO = None,
280    io_output: IO = None,
281    is_io_output_atty=None,
282):
283  """Stream an IO output with max number of rolling lines to display if set.
284
285  Args:
286      input: The file-like object to read the output from.
287      max_lines: The maximum number of rolling lines to display. If None, all
288        lines will be displayed.
289      full_output_receiver: Optional io to receive the full output.
290      io_output: The file-like object to write the output to.
291      is_io_output_atty: Whether the io_output is a TTY.
292  """
293  if io_output is None:
294    io_output = _original_sys_stdout
295  if is_io_output_atty is None:
296    is_io_output_atty = _has_colors(io_output)
297  if not max_lines or not is_io_output_atty:
298    for line in iter(io_input.readline, ''):
299      if not line:
300        break
301      if full_output_receiver is not None:
302        full_output_receiver.write(
303            line if isinstance(line, str) else line.decode('utf-8')
304        )
305      io_output.write(line)
306      io_output.flush()
307    return
308
309  term_width, _ = get_terminal_size()
310  last_lines = deque(maxlen=max_lines)
311  is_rolling = True
312
313  def reset_output():
314    if is_rolling and last_lines:
315      io_output.write(_BASH_CLEAR_PREVIOUS_LINE_CODE * (len(last_lines) + 2))
316
317  def write_output(new_lines: list[str]):
318    if not is_rolling:
319      return
320    last_lines.extend(new_lines)
321    lines = ['========== Rolling subprocess output ==========']
322    lines.extend(last_lines)
323    lines.append('-----------------------------------------------')
324    io_output.write('\n'.join(lines))
325    io_output.write('\n')
326    io_output.flush()
327
328  original_stdout = sys.stdout
329  original_stderr = sys.stderr
330
331  lock = threading.Lock()
332
333  class SafeStdout:
334
335    def __init__(self):
336      self._buffers = []
337
338    def write(self, buf: str) -> None:
339      if len(buf) == 1 and buf[0] == '\n' and self._buffers:
340        with lock:
341          reset_output()
342          original_stdout.write(''.join(self._buffers))
343          original_stdout.write('\n')
344          original_stdout.flush()
345          write_output([])
346          self._buffers.clear()
347      else:
348        self._buffers.append(buf)
349
350    def flush(self) -> None:
351      original_stdout.flush()
352
353  sys.stdout = SafeStdout()
354  sys.stderr = sys.stdout
355
356  for line in iter(io_input.readline, ''):
357    if not line:
358      break
359    line = line.decode('utf-8') if isinstance(line, bytes) else line
360    if full_output_receiver is not None:
361      full_output_receiver.write(line)
362    line = line.rstrip().replace('\t', '  ')
363    # Split the line if it's longer than the terminal width
364    wrapped_lines = (
365        [line]
366        if len(line) <= term_width
367        else [line[i : i + term_width] for i in range(0, len(line), term_width)]
368    )
369    with lock:
370      reset_output()
371      write_output(wrapped_lines)
372
373  with lock:
374    reset_output()
375    is_rolling = False
376    io_output.write(_BASH_RESET_CODE)
377    io_output.flush()
378
379  sys.stdout = original_stdout
380  sys.stderr = original_stderr
381
382  io_input.close()
383
384
385def run_limited_output(
386    cmd, env_vars=None, shell=False, start_new_session=False
387):
388  """Runs a given command and streams the output on a single line in stdout.
389
390  Args:
391      cmd: A list of strings representing the command to run.
392      env_vars: Optional arg. Dict of env vars to set during build.
393      shell: Optional arg. Whether to use shell to run the command.
394      start_new_session: Optional arg. Whether to start a new session for the
395        command.
396
397  Raises:
398      subprocess.CalledProcessError: When the command exits with a non-0
399          exitcode.
400  """
401  # Send stderr to stdout so we only have to deal with a single pipe.
402  with subprocess.Popen(
403      cmd,
404      stdout=subprocess.PIPE,
405      stderr=subprocess.STDOUT,
406      env=env_vars,
407      shell=shell,
408      start_new_session=start_new_session,
409      text=True,
410  ) as proc:
411    full_output_receiver = io.StringIO()
412    stream_io_output(
413        proc.stdout,
414        DEFAULT_OUTPUT_ROLLING_LINES,
415        full_output_receiver,
416        _original_sys_stdout,
417    )
418    returncode = proc.wait()
419    if returncode:
420      raise subprocess.CalledProcessError(
421          returncode, cmd, full_output_receiver.getvalue()
422      )
423
424
425def get_build_out_dir(*joinpaths) -> Path:
426  """Get android build out directory.
427
428  The order of the rules are:
429  1. OUT_DIR
430  2. OUT_DIR_COMMON_BASE
431  3. ANDROID_BUILD_TOP/out
432
433  e.g. OUT_DIR='/disk1/out' -> '/disk1/out'
434       OUT_DIR='out_dir'    -> '<build_top>/out_dir'
435
436       Assume the branch name is 'aosp-main':
437       OUT_DIR_COMMON_BASE='/disk2/out' -> '/disk1/out/aosp-main'
438       OUT_DIR_COMMON_BASE='out_dir'    -> '<build_top>/out_dir/aosp-main'
439
440  Returns:
441      Absolute Path of the out directory.
442  """
443  out_dir = getenv_abs_path('OUT_DIR')
444  if out_dir:
445    return out_dir.joinpath(*joinpaths)
446
447  # https://source.android.com/setup/build/initializing#using-a-separate-output-directory
448  basename = get_build_top().name
449  out_dir_common_base = getenv_abs_path('OUT_DIR_COMMON_BASE', basename)
450  if out_dir_common_base:
451    return out_dir_common_base.joinpath(*joinpaths)
452
453  return get_build_top('out').joinpath(*joinpaths)
454
455
456def update_build_env(env: Dict[str, str]):
457  """Method that updates build environment variables."""
458  # pylint: disable=global-statement, global-variable-not-assigned
459  global _BUILD_ENV
460  _BUILD_ENV.update(env)
461
462
463def build(build_targets: Set[str]):
464  """Shell out and invoke run_build_cmd to make build_targets.
465
466  Args:
467      build_targets: A set of strings of build targets to make.
468
469  Returns:
470      Boolean of whether build command was successful, True if nothing to
471      build.
472  """
473  if not build_targets:
474    logging.debug('No build targets, skipping build.')
475    return True
476
477  # pylint: disable=global-statement, global-variable-not-assigned
478  global _BUILD_ENV
479  full_env_vars = os.environ.copy()
480  update_build_env(full_env_vars)
481  print(
482      '\n%s\n%s'
483      % (mark_cyan('Building Dependencies...'), ', '.join(build_targets))
484  )
485  logging.debug('Building Dependencies: %s', ' '.join(build_targets))
486  cmd = get_build_cmd() + list(build_targets)
487  return _run_build_cmd(cmd, _BUILD_ENV)
488
489
490def _run_build_cmd_with_limited_output(
491    cmd: List[str], env_vars: Dict[str, str] = None
492) -> None:
493  """Runs the build command and streams the output on a single line in stdout.
494
495  Args:
496      cmd: A list of strings representing the command to run.
497      env_vars: Optional arg. Dict of env vars to set during build.
498
499  Raises:
500      subprocess.CalledProcessError: When the command exits with a non-0
501          exitcode.
502  """
503  try:
504    run_limited_output(cmd, env_vars=env_vars)
505  except subprocess.CalledProcessError as e:
506    # get error log from "OUT_DIR/error.log"
507    error_log_file = get_build_out_dir('error.log')
508    output = []
509    if error_log_file.is_file():
510      if error_log_file.stat().st_size > 0:
511        with open(error_log_file, encoding='utf-8') as f:
512          output = f.read()
513    if not output:
514      output = _capture_limited_output(e.output)
515    raise subprocess.CalledProcessError(e.returncode, e.cmd, output)
516
517
518def _run_build_cmd(cmd: List[str], env_vars: Dict[str, str]):
519  """The main process of building targets.
520
521  Args:
522      cmd: A list of soong command.
523      env_vars: Dict of environment variables used for build.
524
525  Returns:
526      Boolean of whether build command was successful, True if nothing to
527      build.
528  """
529  logging.debug('Executing command: %s', cmd)
530  build_profiler = _build_env_profiling()
531  try:
532    if env_vars.get('BUILD_OUTPUT_MODE') == BuildOutputMode.STREAMED.value:
533      print()
534      subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
535    else:
536      # Note that piping stdout forces Soong to switch to 'dumb terminal
537      # mode' which only prints completed actions. This gives users the
538      # impression that actions are taking longer than they really are.
539      # See b/233044822 for more details.
540      log_path = get_build_out_dir('verbose.log.gz')
541      print(
542          '\n(Build log may not reflect actual status in simple output'
543          'mode; check {} for detail after build finishes.)'.format(
544              mark_cyan(f'{log_path}')
545          ),
546          end='',
547      )
548      _run_build_cmd_with_limited_output(cmd, env_vars=env_vars)
549    _send_build_condition_metrics(build_profiler, cmd)
550    print_and_log_info('Build successful')
551    return True
552  except subprocess.CalledProcessError as err:
553    print_and_log_error('Build failure when running: %s', ' '.join(cmd))
554    if err.output:
555      print_and_log_error(err.output)
556    return False
557
558
559# pylint: disable=unused-argument
560def get_result_server_args(for_test_mapping=False):
561  """Return list of args for communication with result server.
562
563  Args:
564      for_test_mapping: True if the test run is for Test Mapping to include
565        additional reporting args. Default is False.
566  """
567  # Customize test mapping argument here if needed.
568  return constants.RESULT_SERVER_ARGS
569
570
571def sort_and_group(iterable, key):
572  """Sort and group helper function."""
573  return itertools.groupby(sorted(iterable, key=key), key=key)
574
575
576def is_supported_mainline_module(installed_path: str) -> re.Match:
577  """Determine whether the given path is supported."""
578  return re.search(MAINLINE_MODULES_EXT_RE, installed_path)
579
580
581def get_test_and_mainline_modules(test_name: str) -> re.Match:
582  """Return test name and mainline modules from the given test."""
583  return TEST_WITH_MAINLINE_MODULES_RE.match(test_name)
584
585
586def is_test_mapping(args):
587  """Check if the atest command intends to run tests in test mapping.
588
589  When atest runs tests in test mapping, it must have at most one test
590  specified. If a test is specified, it must be started with  `:`,
591  which means the test value is a test group name in TEST_MAPPING file, e.g.,
592  `:postsubmit`.
593
594  If --host-unit-test-only or --smart-testing-local was applied, it doesn't
595  intend to be a test_mapping test.
596  If any test mapping options is specified, the atest command must also be
597  set to run tests in test mapping files.
598
599  Args:
600      args: arg parsed object.
601
602  Returns:
603      True if the args indicates atest shall run tests in test mapping. False
604      otherwise.
605  """
606  if args.host_unit_test_only:
607    return False
608  if any((args.test_mapping, args.include_subdirs, not args.tests)):
609    return True
610  # ':postsubmit' implicitly indicates running in test-mapping mode.
611  return all((len(args.tests) == 1, args.tests[0][0] == ':'))
612
613
614def is_atty_terminal() -> bool:
615  """Check if the current process is running in a TTY."""
616  return getattr(_original_sys_stdout, 'isatty', lambda: False)()
617
618
619def _has_colors(stream):
620  """Check the output stream is colorful.
621
622  Args:
623      stream: The standard file stream.
624
625  Returns:
626      True if the file stream can interpreter the ANSI color code.
627  """
628  # Following from Python cookbook, #475186
629  # Auto color only on TTYs
630  # curses.tigetnum() cannot be used for telling supported color numbers
631  # because it does not come with the prebuilt py3-cmd.
632  return getattr(stream, 'isatty', lambda: False)()
633
634
635def colorize(text, color, bp_color=None):
636  """Convert to colorful string with ANSI escape code.
637
638  Args:
639      text: A string to print.
640      color: Forground(Text) color which is an ANSI code shift for colorful
641        print. They are defined in constants_default.py.
642      bp_color: Backgroud color which is an ANSI code shift for colorful print.
643
644  Returns:
645      Colorful string with ANSI escape code.
646  """
647  clr_pref = '\033[1;'
648  clr_suff = '\033[0m'
649  has_colors = _has_colors(_original_sys_stdout)
650  if has_colors:
651    background_color = ''
652    if bp_color:
653      # Foreground(Text) ranges from 30-37
654      text_color = 30 + color
655      # Background ranges from 40-47
656      background_color = ';%d' % (40 + bp_color)
657    else:
658      text_color = 30 + color
659    clr_str = '%s%d%sm%s%s' % (
660        clr_pref,
661        text_color,
662        background_color,
663        text,
664        clr_suff,
665    )
666  else:
667    clr_str = text
668  return clr_str
669
670
671def mark_red(text):
672  """Wrap colorized function and print in red."""
673  return colorize(text, constants.RED)
674
675
676def mark_yellow(text):
677  """Wrap colorized function and print in yellow."""
678  return colorize(text, constants.YELLOW)
679
680
681def mark_green(text):
682  """Wrap colorized function and print in green."""
683  return colorize(text, constants.GREEN)
684
685
686def mark_magenta(text):
687  """Wrap colorized function and print in magenta."""
688  return colorize(text, constants.MAGENTA)
689
690
691def mark_cyan(text):
692  """Wrap colorized function and print in cyan."""
693  return colorize(text, constants.CYAN)
694
695
696def mark_blue(text):
697  """Wrap colorized function and print in blue."""
698  return colorize(text, constants.BLUE)
699
700
701def colorful_print(text, color=None, bp_color=None, auto_wrap=True):
702  """Print out the text with color.
703
704  Args:
705      text: A string to print.
706      color: Forground(Text) color which is an ANSI code shift for colorful
707        print. They are defined in constants_default.py.
708      bp_color: Backgroud color which is an ANSI code shift for colorful print.
709      auto_wrap: If True, Text wraps while print.
710  """
711  output = colorize(text, color, bp_color) if color else text
712  if auto_wrap:
713    print(output)
714  else:
715    print(output, end='')
716
717
718def _print_to_console(
719    prefix: str, msg: Any, *fmt_args: list[Any], color: int = None
720) -> None:
721  """Print a message to the console.
722
723  Args:
724    msg: The message to format.
725    *fmt_args: Format arguments for the message.
726  """
727  if not fmt_args:
728    evaluated_msg = str(msg)
729  else:
730    try:
731      evaluated_msg = msg % fmt_args
732    except (TypeError, ValueError):
733      traceback.print_exc()
734      return
735  colorful_print(f'{prefix}{evaluated_msg}', color)
736
737
738def print_and_log_error(msg, *fmt_args):
739  """Print error message to the console and log it.
740
741  Args:
742    msg: The message to print.
743    *fmt_args: Format arguments for the message.
744  """
745  logging.error(msg, *fmt_args)
746  _print_to_console('Error: ', msg, *fmt_args, color=constants.RED)
747
748
749def print_and_log_warning(msg, *fmt_args):
750  """Print warning message to the console and log it.
751
752  Args:
753    msg: The message to print.
754    *fmt_args: Format arguments for the message.
755  """
756  logging.warning(msg, *fmt_args)
757  _print_to_console('Warning: ', msg, *fmt_args, color=constants.MAGENTA)
758
759
760def print_and_log_info(msg, *fmt_args):
761  """Print info message to the console and log it.
762
763  Args:
764    msg: The message to print.
765    *fmt_args: Format arguments for the message.
766  """
767  logging.info(msg, *fmt_args)
768  _print_to_console(mark_cyan('Info: '), msg, *fmt_args)
769
770
771def get_terminal_size():
772  """Get terminal size and return a tuple.
773
774  Returns:
775      2 integers: the size of X(columns) and Y(lines/rows).
776  """
777  # Determine the width of the terminal. We'll need to clear this many
778  # characters when carriage returning. Set default value as 80.
779  columns, rows = shutil.get_terminal_size(
780      fallback=(_DEFAULT_TERMINAL_WIDTH, _DEFAULT_TERMINAL_HEIGHT)
781  )
782  return columns, rows
783
784
785def _get_hashed_file_name(main_file_name):
786  """Convert the input string to a md5-hashed string.
787
788  If file_extension is
789
790     given, returns $(hashed_string).$(file_extension), otherwise
791     $(hashed_string).cache.
792
793  Args:
794      main_file_name: The input string need to be hashed.
795
796  Returns:
797      A string as hashed file name with .cache file extension.
798  """
799  hashed_fn = hashlib.md5(str(main_file_name).encode())
800  hashed_name = hashed_fn.hexdigest()
801  return hashed_name + '.cache'
802
803
804def md5sum(filename):
805  """Generate MD5 checksum of a file.
806
807  Args:
808      name: A string of a filename.
809
810  Returns:
811      A string of hashed MD5 checksum.
812  """
813  filename = Path(filename)
814  if not filename.is_file():
815    return ''
816  with open(filename, 'rb') as target:
817    content = target.read()
818  if not isinstance(content, bytes):
819    content = content.encode('utf-8')
820  return hashlib.md5(content).hexdigest()
821
822
823def check_md5(check_file, missing_ok=False):
824  """Method equivalent to 'md5sum --check /file/to/check'.
825
826  Args:
827      check_file: A string of filename that stores filename and its md5
828        checksum.
829      missing_ok: A boolean that considers OK even when the check_file does not
830        exist. Using missing_ok=True allows ignoring md5 check especially for
831        initial run that the check_file has not yet generated. Using
832        missing_ok=False ensures the consistency of files, and guarantees the
833        process is successfully completed.
834
835  Returns:
836      When missing_ok is True (soft check):
837        - True if the checksum is consistent with the actual MD5, even the
838          check_file is missing or not a valid JSON.
839        - False when the checksum is inconsistent with the actual MD5.
840      When missing_ok is False (ensure the process completed properly):
841        - True if the checksum is consistent with the actual MD5.
842        - False otherwise.
843  """
844  if not Path(check_file).is_file():
845    if not missing_ok:
846      logging.debug('Unable to verify: %s not found.', check_file)
847    return missing_ok
848  content = load_json_safely(check_file)
849  if content:
850    for filename, md5 in content.items():
851      if md5sum(filename) != md5:
852        logging.debug('%s has altered.', filename)
853        return False
854    return True
855  return False
856
857
858def save_md5(filenames, save_file):
859  """Method equivalent to 'md5sum file1 file2 > /file/to/check'
860
861  Args:
862      filenames: A list of filenames.
863      save_file: Filename for storing files and their md5 checksums.
864  """
865  data = {}
866  for f in filenames:
867    name = Path(f)
868    if not name.is_file():
869      print_and_log_warning(' ignore %s: not a file.', name)
870    data.update({str(name): md5sum(name)})
871  with open(save_file, 'w+', encoding='utf-8') as _file:
872    json.dump(data, _file)
873
874
875def get_cache_root():
876  """Get the root path dir for cache.
877
878  Use branch and target information as cache_root.
879  The path will look like:
880     $(ANDROID_PRODUCT_OUT)/atest_cache/$CACHE_VERSION
881
882  Returns:
883      A string of the path of the root dir of cache.
884  """
885  # Note that the cache directory is stored in the build output directory. We
886  # do this because this directory is periodically cleaned and don't have to
887  # worry about the files growing without bound. The files are also much
888  # smaller than typical build output and less of an issue. Use build out to
889  # save caches which is next to atest_bazel_workspace which is easy for user
890  # to manually clean up if need. Use product out folder's base name as part
891  # of directory because of there may be different module-info in the same
892  # branch but different lunch target.
893  return os.path.join(
894      get_build_out_dir(),
895      'atest_cache',
896      f'ver_{CACHE_VERSION}',
897      os.path.basename(
898          os.environ.get(
899              constants.ANDROID_PRODUCT_OUT, constants.ANDROID_PRODUCT_OUT
900          )
901      ),
902  )
903
904
905def get_test_info_cache_path(test_reference, cache_root=None):
906  """Get the cache path of the desired test_infos.
907
908  Args:
909      test_reference: A string of the test.
910      cache_root: Folder path where stores caches.
911
912  Returns:
913      A string of the path of test_info cache.
914  """
915  if not cache_root:
916    cache_root = get_cache_root()
917  return os.path.join(cache_root, _get_hashed_file_name(test_reference))
918
919
920def update_test_info_cache(test_reference, test_infos, cache_root=None):
921  """Update cache content which stores a set of test_info objects through
922
923     pickle module, each test_reference will be saved as a cache file.
924
925  Args:
926      test_reference: A string referencing a test.
927      test_infos: A set of TestInfos.
928      cache_root: Folder path for saving caches.
929  """
930  if not cache_root:
931    cache_root = get_cache_root()
932  if not os.path.isdir(cache_root):
933    os.makedirs(cache_root)
934  cache_path = get_test_info_cache_path(test_reference, cache_root)
935  # Save test_info to files.
936  try:
937    with open(cache_path, 'wb') as test_info_cache_file:
938      logging.debug('Saving cache for %s as %s.', test_reference, cache_path)
939      pickle.dump(test_infos, test_info_cache_file, protocol=2)
940  except (pickle.PicklingError, TypeError, IOError) as err:
941    # Won't break anything, just log this error, and collect the exception
942    # by metrics.
943    logging.debug('Exception raised: %s', err)
944    metrics_utils.handle_exc_and_send_exit_event(constants.ACCESS_CACHE_FAILURE)
945
946
947def load_test_info_cache(test_reference, cache_root=None):
948  """Load cache by test_reference to a set of test_infos object.
949
950  Args:
951      test_reference: A string referencing a test.
952      cache_root: Folder path for finding caches.
953
954  Returns:
955      A list of TestInfo namedtuple if cache found, else None.
956  """
957  if not cache_root:
958    cache_root = get_cache_root()
959
960  cache_file = get_test_info_cache_path(test_reference, cache_root)
961  if os.path.isfile(cache_file):
962    logging.debug('Loading cache %s from %s.', test_reference, cache_file)
963    try:
964      with open(cache_file, 'rb') as config_dictionary_file:
965        return pickle.load(config_dictionary_file, encoding='utf-8')
966    except (
967        pickle.UnpicklingError,
968        ValueError,
969        TypeError,
970        EOFError,
971        IOError,
972        ImportError,
973    ) as err:
974      # Won't break anything, just remove the old cache, log this error,
975      # and collect the exception by metrics.
976      logging.debug('Exception raised: %s', err)
977      os.remove(cache_file)
978      metrics_utils.handle_exc_and_send_exit_event(
979          constants.ACCESS_CACHE_FAILURE
980      )
981  return None
982
983
984def clean_test_info_caches(tests, cache_root=None):
985  """Clean caches of input tests.
986
987  Args:
988      tests: A list of test references.
989      cache_root: Folder path for finding caches.
990  """
991  if not cache_root:
992    cache_root = get_cache_root()
993  for test in tests:
994    cache_file = get_test_info_cache_path(test, cache_root)
995    if os.path.isfile(cache_file):
996      logging.debug('Removing cache: %s', cache_file)
997      try:
998        os.remove(cache_file)
999      except IOError as err:
1000        logging.debug('Exception raised: %s', err)
1001        metrics_utils.handle_exc_and_send_exit_event(
1002            constants.ACCESS_CACHE_FAILURE
1003        )
1004
1005
1006def get_modified_files(root_dir):
1007  """Get the git modified files.
1008
1009  The git path here is git top level of the root_dir. It's inevitable to utilise
1010  different commands to fulfill 2 scenario:
1011
1012      1. locate unstaged/staged files
1013      2. locate committed files but not yet merged.
1014  the 'git_status_cmd' fulfils the former while the 'find_modified_files'
1015  fulfils the latter.
1016
1017  Args:
1018      root_dir: the root where it starts finding.
1019
1020  Returns:
1021      A set of modified files altered since last commit.
1022  """
1023  modified_files = set()
1024  try:
1025    # TODO: (@jimtang) abandon using git command within Atest.
1026    find_git_cmd = f'cd {root_dir}; git rev-parse --show-toplevel 2>/dev/null'
1027    git_paths = (
1028        subprocess.check_output(find_git_cmd, shell=True).decode().splitlines()
1029    )
1030    for git_path in git_paths:
1031      # Find modified files from git working tree status.
1032      git_status_cmd = (
1033          "repo forall {} -c git status --short | awk '{{print $NF}}'"
1034      ).format(git_path)
1035      modified_wo_commit = (
1036          subprocess.check_output(git_status_cmd, shell=True)
1037          .decode()
1038          .rstrip()
1039          .splitlines()
1040      )
1041      for change in modified_wo_commit:
1042        modified_files.add(os.path.normpath('{}/{}'.format(git_path, change)))
1043      # Find modified files that are committed but not yet merged.
1044      find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
1045      commit_modified_files = (
1046          subprocess.check_output(find_modified_files, shell=True)
1047          .decode()
1048          .splitlines()
1049      )
1050      for line in commit_modified_files:
1051        modified_files.add(os.path.normpath('{}/{}'.format(git_path, line)))
1052  except (OSError, subprocess.CalledProcessError) as err:
1053    logging.debug('Exception raised: %s', err)
1054  return modified_files
1055
1056
1057def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
1058  """A handy delimiter printer.
1059
1060  Args:
1061      char: A string used for delimiter.
1062      length: An integer for the replication.
1063      prenl: An integer that insert '\n' before delimiter.
1064      postnl: An integer that insert '\n' after delimiter.
1065
1066  Returns:
1067      A string of delimiter.
1068  """
1069  return prenl * '\n' + char * length + postnl * '\n'
1070
1071
1072def find_files(path, file_name=constants.TEST_MAPPING, followlinks=False):
1073  """Find all files with given name under the given path.
1074
1075  Args:
1076      path: A string of path in source.
1077      file_name: The file name pattern for finding matched files.
1078      followlinks: A boolean to indicate whether to follow symbolic links.
1079
1080  Returns:
1081      A list of paths of the files with the matching name under the given
1082      path.
1083  """
1084  match_files = []
1085  for root, _, filenames in os.walk(path, followlinks=followlinks):
1086    try:
1087      for filename in fnmatch.filter(filenames, file_name):
1088        match_files.append(os.path.join(root, filename))
1089    except re.error as e:
1090      msg = 'Unable to locate %s among %s' % (file_name, filenames)
1091      logging.debug(msg)
1092      logging.debug('Exception: %s', e)
1093      metrics.AtestExitEvent(
1094          duration=metrics_utils.convert_duration(0),
1095          exit_code=ExitCode.COLLECT_ONLY_FILE_NOT_FOUND,
1096          stacktrace=msg,
1097          logs=str(e),
1098      )
1099  return match_files
1100
1101
1102def extract_zip_text(zip_path):
1103  """Extract the text files content for input zip file.
1104
1105  Args:
1106      zip_path: The file path of zip.
1107
1108  Returns:
1109      The string in input zip file.
1110  """
1111  content = ''
1112  try:
1113    with zipfile.ZipFile(zip_path) as zip_file:
1114      for filename in zip_file.namelist():
1115        if os.path.isdir(filename):
1116          continue
1117        # Force change line if multiple text files in zip
1118        content = content + '\n'
1119        # read the file
1120        with zip_file.open(filename) as extract_file:
1121          for line in extract_file:
1122            if matched_tf_error_log(line.decode()):
1123              content = content + line.decode()
1124  except zipfile.BadZipfile as err:
1125    logging.debug('Exception raised: %s', err)
1126  return content
1127
1128
1129def matched_tf_error_log(content):
1130  """Check if the input content matched tradefed log pattern.
1131
1132  The format will look like this. 05-25 17:37:04 W/XXXXXX 05-25 17:37:04
1133  E/XXXXXX
1134
1135  Args:
1136      content: Log string.
1137
1138  Returns:
1139      True if the content matches the regular expression for tradefed error or
1140      warning log.
1141  """
1142  reg = (
1143      '^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
1144      '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)'
1145  )
1146  if re.search(reg, content):
1147    return True
1148  return False
1149
1150
1151def read_test_record(path):
1152  """A Helper to read test record proto.
1153
1154  Args:
1155      path: The proto file path.
1156
1157  Returns:
1158      The test_record proto instance.
1159  """
1160  with open(path, 'rb') as proto_file:
1161    msg = test_record_pb2.TestRecord()
1162    msg.ParseFromString(proto_file.read())
1163  return msg
1164
1165
1166def has_python_module(module_name):
1167  """Detect if the module can be loaded without importing it in real.
1168
1169  Args:
1170      cmd: A string of the tested module name.
1171
1172  Returns:
1173      True if found, False otherwise.
1174  """
1175  return bool(importlib.util.find_spec(module_name))
1176
1177
1178def load_json_safely(jsonfile):
1179  """Load the given json file as an object.
1180
1181  Args:
1182      jsonfile: The json file path.
1183
1184  Returns:
1185      The content of the give json file. Null dict when:
1186      1. the given path doesn't exist.
1187      2. the given path is not a json or invalid format.
1188  """
1189  if isinstance(jsonfile, bytes):
1190    jsonfile = jsonfile.decode('utf-8')
1191  if Path(jsonfile).is_file():
1192    try:
1193      with open(jsonfile, 'r', encoding='utf-8') as cache:
1194        return json.load(cache)
1195    except json.JSONDecodeError:
1196      logging.debug('Exception happened while loading %s.', jsonfile)
1197  else:
1198    logging.debug('%s: File not found.', jsonfile)
1199  return {}
1200
1201
1202def get_atest_version():
1203  """Get atest version.
1204
1205  Returns:
1206      Version string from the VERSION file, e.g. prebuilt
1207          2022-11-24_9314547  (<release_date>_<build_id>)
1208
1209      If VERSION does not exist (src or local built):
1210          2022-11-24_5d448c50 (<commit_date>_<commit_id>)
1211
1212      If the git command fails for unexpected reason:
1213          2022-11-24_unknown  (<today_date>_unknown)
1214  """
1215  try:
1216    with importlib.resources.as_file(
1217        importlib.resources.files('atest').joinpath('VERSION')
1218    ) as version_file_path:
1219      return version_file_path.read_text(encoding='utf-8')
1220  except (ModuleNotFoundError, FileNotFoundError):
1221    logging.debug(
1222        'Failed to load package resource atest/VERSION, possibly due to running'
1223        ' from atest-dev, atest-src, a prebuilt without embedded launcher, or a'
1224        ' prebuilt not created by the asuite release tool. Falling back to'
1225        ' legacy source search.'
1226    )
1227    version_file = Path(__file__).resolve().parent.joinpath('VERSION')
1228    if Path(version_file).is_file():
1229      return open(version_file, encoding='utf-8').read()
1230
1231  # Try fetching commit date (%ci) and commit hash (%h).
1232  git_cmd = 'git log -1 --pretty=format:"%ci;%h"'
1233  try:
1234    # commit date/hash are only available when running from the source
1235    # and the local built.
1236    result = subprocess.run(
1237        git_cmd,
1238        shell=True,
1239        check=False,
1240        capture_output=True,
1241        cwd=Path(os.getenv(constants.ANDROID_BUILD_TOP), '').joinpath(
1242            'tools/asuite/atest'
1243        ),
1244    )
1245    if result.stderr:
1246      raise subprocess.CalledProcessError(returncode=0, cmd=git_cmd)
1247    raw_date, commit = result.stdout.decode().split(';')
1248    date = datetime.datetime.strptime(raw_date, '%Y-%m-%d %H:%M:%S %z').date()
1249  # atest_dir doesn't exist will throw FileNotFoundError.
1250  except (subprocess.CalledProcessError, FileNotFoundError):
1251    # Use today as the commit date for unexpected conditions.
1252    date = datetime.datetime.today().date()
1253    commit = 'unknown'
1254  return f'{date}_{commit}'
1255
1256
1257def get_manifest_branch(show_aosp=False):
1258  """Get the manifest branch.
1259
1260  Args:
1261      show_aosp: A boolean that shows 'aosp' prefix by checking the 'remote'
1262        attribute.
1263
1264  Returns:
1265      The value of 'revision' of the included xml or default.xml.
1266
1267      None when no ANDROID_BUILD_TOP or unable to access default.xml.
1268  """
1269  #      (portal xml)                            (default xml)
1270  # +--------------------+ _get_include() +-----------------------------+
1271  # | .repo/manifest.xml |--------------->| .repo/manifests/default.xml |
1272  # +--------------------+                +---------------+-------------+
1273  #                          <default revision="master" |
1274  #                                   remote="aosp"     | _get_revision()
1275  #                                   sync-j="4"/>      V
1276  #                                                 +--------+
1277  #                                                 | master |
1278  #                                                 +--------+
1279  build_top = os.getenv(constants.ANDROID_BUILD_TOP)
1280  if not build_top:
1281    return None
1282  portal_xml = Path(build_top).joinpath('.repo', 'manifest.xml')
1283  default_xml = Path(build_top).joinpath('.repo/manifests', 'default.xml')
1284
1285  def _get_revision(xml):
1286    try:
1287      xml_root = ET.parse(xml).getroot()
1288    except (IOError, OSError, ET.ParseError):
1289      # TODO(b/274989179) Change back to warning once warning if not going
1290      # to be treat as test failure. Or test_get_manifest_branch unit test
1291      # could be fix if return None if portal_xml or default_xml not
1292      # exist.
1293      logging.info('%s could not be read.', xml)
1294      return ''
1295    default_tags = xml_root.findall('./default')
1296    if default_tags:
1297      prefix = ''
1298      for tag in default_tags:
1299        branch = tag.attrib.get('revision')
1300        if show_aosp and tag.attrib.get('remote') == 'aosp':
1301          prefix = 'aosp-'
1302        return f'{prefix}{branch}'
1303    return ''
1304
1305  def _get_include(xml):
1306    try:
1307      xml_root = ET.parse(xml).getroot()
1308    except (IOError, OSError, ET.ParseError):
1309      # TODO(b/274989179) Change back to warning once warning if not going
1310      # to be treat as test failure. Or test_get_manifest_branch unit test
1311      # could be fix if return None if portal_xml or default_xml not
1312      # exist.
1313      logging.info('%s could not be read.', xml)
1314      return Path()
1315    include_tags = xml_root.findall('./include')
1316    if include_tags:
1317      for tag in include_tags:
1318        name = tag.attrib.get('name')
1319        if name:
1320          return Path(build_top).joinpath('.repo/manifests', name)
1321    return default_xml
1322
1323  # 1. Try getting revision from .repo/manifests/default.xml
1324  if default_xml.is_file():
1325    return _get_revision(default_xml)
1326  # 2. Try getting revision from the included xml of .repo/manifest.xml
1327  include_xml = _get_include(portal_xml)
1328  if include_xml.is_file():
1329    return _get_revision(include_xml)
1330  # 3. Try getting revision directly from manifest.xml (unlikely to happen)
1331  return _get_revision(portal_xml)
1332
1333
1334def get_build_target():
1335  """Get the build target form system environment TARGET_PRODUCT."""
1336  build_target = '%s-%s-%s' % (
1337      os.getenv(constants.ANDROID_TARGET_PRODUCT, None),
1338      os.getenv('TARGET_RELEASE', None),
1339      os.getenv(constants.TARGET_BUILD_VARIANT, None),
1340  )
1341  return build_target
1342
1343
1344def has_wildcard(test_name):
1345  """Tell whether the test_name(either a list or string) contains wildcard
1346
1347  symbols.
1348
1349  Args:
1350      test_name: A list or a str.
1351
1352  Return:
1353      True if test_name contains wildcard, False otherwise.
1354  """
1355  if isinstance(test_name, str):
1356    return any(char in test_name for char in _WILDCARD_CHARS)
1357  if isinstance(test_name, list):
1358    for name in test_name:
1359      if has_wildcard(name):
1360        return True
1361  return False
1362
1363
1364def is_build_file(path):
1365  """If input file is one of an android build file.
1366
1367  Args:
1368      path: A string of file path.
1369
1370  Return:
1371      True if path is android build file, False otherwise.
1372  """
1373  return bool(os.path.splitext(path)[-1] in _ANDROID_BUILD_EXT)
1374
1375
1376def quote(input_str):
1377  """If the input string -- especially in custom args -- contains shell-aware
1378
1379  characters, insert a pair of "\" to the input string.
1380
1381  e.g. unit(test|testing|testing) -> 'unit(test|testing|testing)'
1382
1383  Args:
1384      input_str: A string from user input.
1385
1386  Returns: A string with single quotes if regex chars were detected.
1387  """
1388  if has_chars(input_str, _REGEX_CHARS):
1389    return "'" + input_str + "'"
1390  return input_str
1391
1392
1393def has_chars(input_str, chars):
1394  """Check if the input string contains one of the designated characters.
1395
1396  Args:
1397      input_str: A string from user input.
1398      chars: An iterable object.
1399
1400  Returns:
1401      True if the input string contains one of the special chars.
1402  """
1403  for char in chars:
1404    if char in input_str:
1405      return True
1406  return False
1407
1408
1409def prompt_with_yn_result(msg, default=True):
1410  """Prompt message and get yes or no result.
1411
1412  Args:
1413      msg: The question you want asking.
1414      default: boolean to True/Yes or False/No
1415
1416  Returns:
1417      default value if get KeyboardInterrupt or ValueError exception.
1418  """
1419  suffix = '[Y/n]: ' if default else '[y/N]: '
1420  try:
1421    return strtobool(input(msg + suffix))
1422  except (ValueError, KeyboardInterrupt):
1423    return default
1424
1425
1426def strtobool(val):
1427  """Convert a string representation of truth to True or False.
1428
1429  Args:
1430      val: a string of input value.
1431
1432  Returns:
1433      True when values are 'y', 'yes', 't', 'true', 'on', and '1';
1434      False when 'n', 'no', 'f', 'false', 'off', and '0'.
1435      Raises ValueError if 'val' is anything else.
1436  """
1437  if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):
1438    return True
1439  if val.lower() in ('n', 'no', 'f', 'false', 'off', '0'):
1440    return False
1441  raise ValueError('invalid truth value %r' % (val,))
1442
1443
1444def get_android_junit_config_filters(test_config):
1445  """Get the dictionary of a input config for junit config's filters
1446
1447  Args:
1448      test_config: The path of the test config.
1449
1450  Returns:
1451      A dictionary include all the filters in the input config.
1452  """
1453  filter_dict = {}
1454  xml_root = ET.parse(test_config).getroot()
1455  option_tags = xml_root.findall('.//option')
1456  for tag in option_tags:
1457    name = tag.attrib['name'].strip()
1458    if name in constants.SUPPORTED_FILTERS:
1459      filter_values = filter_dict.get(name, [])
1460      value = tag.attrib['value'].strip()
1461      filter_values.append(value)
1462      filter_dict.update({name: filter_values})
1463  return filter_dict
1464
1465
1466def get_config_parameter(test_config):
1467  """Get all the parameter values for the input config
1468
1469  Args:
1470      test_config: The path of the test config.
1471
1472  Returns:
1473      A set include all the parameters of the input config.
1474  """
1475  parameters = set()
1476  xml_root = ET.parse(test_config).getroot()
1477  option_tags = xml_root.findall('.//option')
1478  for tag in option_tags:
1479    name = tag.attrib['name'].strip()
1480    if name == constants.CONFIG_DESCRIPTOR:
1481      key = tag.attrib['key'].strip()
1482      if key == constants.PARAMETER_KEY:
1483        value = tag.attrib['value'].strip()
1484        parameters.add(value)
1485  return parameters
1486
1487
1488def get_config_device(test_config):
1489  """Get all the device names from the input config
1490
1491  Args:
1492      test_config: The path of the test config.
1493
1494  Returns:
1495      A set include all the device name of the input config.
1496  """
1497  devices = set()
1498  try:
1499    xml_root = ET.parse(test_config).getroot()
1500    device_tags = xml_root.findall('.//device')
1501    for tag in device_tags:
1502      name = tag.attrib['name'].strip()
1503      devices.add(name)
1504  except ET.ParseError as e:
1505    colorful_print('Config has invalid format.', constants.RED)
1506    colorful_print('File %s : %s' % (test_config, str(e)), constants.YELLOW)
1507    sys.exit(ExitCode.CONFIG_INVALID_FORMAT)
1508  return devices
1509
1510
1511def get_mainline_param(test_config):
1512  """Get all the mainline-param values for the input config
1513
1514  Args:
1515      test_config: The path of the test config.
1516
1517  Returns:
1518      A set include all the parameters of the input config.
1519  """
1520  mainline_param = set()
1521  xml_root = ET.parse(test_config).getroot()
1522  option_tags = xml_root.findall('.//option')
1523  for tag in option_tags:
1524    name = tag.attrib['name'].strip()
1525    if name == constants.CONFIG_DESCRIPTOR:
1526      key = tag.attrib['key'].strip()
1527      if key == constants.MAINLINE_PARAM_KEY:
1528        value = tag.attrib['value'].strip()
1529        mainline_param.add(value)
1530  return mainline_param
1531
1532
1533def get_adb_devices():
1534  """Run `adb devices` and return a list of devices.
1535
1536  Returns:
1537      A list of devices. e.g.
1538      ['127.0.0.1:40623', '127.0.0.1:40625']
1539  """
1540  probe_cmd = 'adb devices | egrep -v "^List|^$"||true'
1541  suts = subprocess.check_output(probe_cmd, shell=True).decode().splitlines()
1542  return [sut.split('\t')[0] for sut in suts]
1543
1544
1545def get_android_config():
1546  """Get Android config as "printconfig" shows.
1547
1548  Returns:
1549      A dict of Android configurations.
1550  """
1551  dump_cmd = get_build_cmd(dump=True)
1552  raw_config = subprocess.check_output(dump_cmd).decode('utf-8')
1553  android_config = {}
1554  for element in raw_config.splitlines():
1555    if not element.startswith('='):
1556      key, value = tuple(element.split('=', 1))
1557      android_config.setdefault(key, value)
1558  return android_config
1559
1560
1561def get_config_gtest_args(test_config):
1562  """Get gtest's module-name and device-path option from the input config
1563
1564  Args:
1565      test_config: The path of the test config.
1566
1567  Returns:
1568      A string of gtest's module name.
1569      A string of gtest's device path.
1570  """
1571  module_name = ''
1572  device_path = ''
1573  xml_root = ET.parse(test_config).getroot()
1574  option_tags = xml_root.findall('.//option')
1575  for tag in option_tags:
1576    name = tag.attrib['name'].strip()
1577    value = tag.attrib['value'].strip()
1578    if name == 'native-test-device-path':
1579      device_path = value
1580    elif name == 'module-name':
1581      module_name = value
1582  return module_name, device_path
1583
1584
1585def get_arch_name(module_name, is_64=False):
1586  """Get the arch folder name for the input module.
1587
1588  Scan the test case folders to get the matched arch folder name.
1589
1590  Args:
1591      module_name: The module_name of test
1592      is_64: If need 64 bit arch name, False otherwise.
1593
1594  Returns:
1595      A string of the arch name.
1596  """
1597  arch_32 = ['arm', 'x86']
1598  arch_64 = ['arm64', 'x86_64']
1599  arch_list = arch_32
1600  if is_64:
1601    arch_list = arch_64
1602  test_case_root = os.path.join(
1603      os.environ.get(constants.ANDROID_TARGET_OUT_TESTCASES, ''), module_name
1604  )
1605  if not os.path.isdir(test_case_root):
1606    logging.debug('%s does not exist.', test_case_root)
1607    return ''
1608  for f in os.listdir(test_case_root):
1609    if f in arch_list:
1610      return f
1611  return ''
1612
1613
1614def copy_single_arch_native_symbols(
1615    symbol_root, module_name, device_path, is_64=False
1616):
1617  """Copy symbol files for native tests which belong to input arch.
1618
1619  Args:
1620      module_name: The module_name of test
1621      device_path: The device path define in test config.
1622      is_64: True if need to copy 64bit symbols, False otherwise.
1623  """
1624  src_symbol = os.path.join(symbol_root, 'data', 'nativetest', module_name)
1625  if is_64:
1626    src_symbol = os.path.join(symbol_root, 'data', 'nativetest64', module_name)
1627  dst_symbol = os.path.join(
1628      symbol_root,
1629      device_path[1:],
1630      module_name,
1631      get_arch_name(module_name, is_64),
1632  )
1633  if os.path.isdir(src_symbol):
1634    # TODO: Use shutil.copytree(src, dst, dirs_exist_ok=True) after
1635    #  python3.8
1636    if os.path.isdir(dst_symbol):
1637      shutil.rmtree(dst_symbol)
1638    shutil.copytree(src_symbol, dst_symbol)
1639
1640
1641def copy_native_symbols(module_name, device_path):
1642  """Copy symbol files for native tests to match with tradefed file structure.
1643
1644  The original symbols will locate at
1645  $(PRODUCT_OUT)/symbols/data/nativetest(64)/$(module)/$(stem).
1646  From TF, the test binary will locate at
1647  /data/local/tmp/$(module)/$(arch)/$(stem).
1648  In order to make trace work need to copy the original symbol to
1649  $(PRODUCT_OUT)/symbols/data/local/tmp/$(module)/$(arch)/$(stem)
1650
1651  Args:
1652      module_name: The module_name of test
1653      device_path: The device path define in test config.
1654  """
1655  symbol_root = os.path.join(
1656      os.environ.get(constants.ANDROID_PRODUCT_OUT, ''), 'symbols'
1657  )
1658  if not os.path.isdir(symbol_root):
1659    logging.debug('Symbol dir:%s not exist, skip copy symbols.', symbol_root)
1660    return
1661  # Copy 32 bit symbols
1662  if get_arch_name(module_name, is_64=False):
1663    copy_single_arch_native_symbols(
1664        symbol_root, module_name, device_path, is_64=False
1665    )
1666  # Copy 64 bit symbols
1667  if get_arch_name(module_name, is_64=True):
1668    copy_single_arch_native_symbols(
1669        symbol_root, module_name, device_path, is_64=True
1670    )
1671
1672
1673def get_config_preparer_options(test_config, class_name):
1674  """Get all the parameter values for the input config
1675
1676  Args:
1677      test_config: The path of the test config.
1678      class_name: A string of target_preparer
1679
1680  Returns:
1681      A set include all the parameters of the input config.
1682  """
1683  options = {}
1684  xml_root = ET.parse(test_config).getroot()
1685  option_tags = xml_root.findall(
1686      './/target_preparer[@class="%s"]/option' % class_name
1687  )
1688  for tag in option_tags:
1689    name = tag.attrib['name'].strip()
1690    value = tag.attrib['value'].strip()
1691    options[name] = value
1692  return options
1693
1694
1695def get_verify_key(tests, extra_args):
1696  """Compose test command key.
1697
1698  Args:
1699      test_name: A list of input tests.
1700      extra_args: Dict of extra args to add to test run.
1701
1702  Returns:
1703      A composed test commands.
1704  """
1705  # test_commands is a concatenated string of sorted test_ref+extra_args.
1706  # For example, "ITERATIONS=5 hello_world_test"
1707  test_commands = tests
1708  for key, value in extra_args.items():
1709    test_commands.append('%s=%s' % (key, str(value)))
1710  test_commands.sort()
1711  return ' '.join(test_commands)
1712
1713
1714def save_build_files_timestamp():
1715  """Method that generate timestamp of Android.{bp,mk} files.
1716
1717  The checksum of build files are stores in
1718      $ANDROID_HOST_OUT/indices/buildfiles.stp
1719  """
1720  plocate_db = get_index_path(constants.LOCATE_CACHE)
1721  plocate_db_exist = plocate_db.is_file()
1722  logging.debug(
1723      'Build files timestamp db file %s exists: %s',
1724      plocate_db,
1725      plocate_db_exist,
1726  )
1727
1728  if plocate_db_exist:
1729    cmd = f'locate -d{plocate_db} --existing ' r'--regex "/Android\.(bp|mk)$"'
1730    results = subprocess.getoutput(cmd)
1731    if results:
1732      timestamp = {}
1733      for build_file in results.splitlines():
1734        timestamp.update({build_file: Path(build_file).stat().st_mtime})
1735
1736      timestamp_file = get_index_path(constants.BUILDFILES_STP)
1737      logging.debug('Writing to build files timestamp db %s', timestamp_file)
1738      with open(timestamp_file, 'w', encoding='utf-8') as _file:
1739        json.dump(timestamp, _file)
1740
1741
1742def run_multi_proc(func, *args, **kwargs) -> Process:
1743  """Start a process with multiprocessing and return Process object.
1744
1745  Args:
1746      func: A string of function name which will be the target name.
1747        args/kwargs: check doc page:
1748      https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
1749
1750  Returns:
1751      multiprocessing.Process object.
1752  """
1753  proc = Process(target=func, *args, **kwargs)
1754  proc.start()
1755  return proc
1756
1757
1758def start_threading(target, *args, **kwargs) -> Thread:
1759  """Start a Thread-based parallelism.
1760
1761  Args:
1762      target: A string of function name which will be the target name.
1763        args/kwargs: check doc page:
1764        https://docs.python.org/3/library/threading.html#threading.Thread
1765
1766  Returns:
1767      threading.Thread object.
1768  """
1769  proc = Thread(target=target, *args, **kwargs)
1770  proc.start()
1771  return proc
1772
1773
1774def get_prebuilt_sdk_tools_dir():
1775  """Get the path for the prebuilt sdk tools root dir.
1776
1777  Returns: The absolute path of prebuilt sdk tools directory.
1778  """
1779  build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1780  return build_top.joinpath(
1781      'prebuilts/sdk/tools/', str(platform.system()).lower(), 'bin'
1782  )
1783
1784
1785def is_writable(path):
1786  """Check if the given path is writable.
1787
1788  Returns: True if input path is writable, False otherwise.
1789  """
1790  if not os.path.exists(path):
1791    return is_writable(os.path.dirname(path))
1792  return os.access(path, os.W_OK)
1793
1794
1795def get_misc_dir():
1796  """Get the path for the ATest data root dir.
1797
1798  Returns: The absolute path of the ATest data root dir.
1799  """
1800  home_dir = os.path.expanduser('~')
1801  if is_writable(home_dir):
1802    return home_dir
1803  return get_build_out_dir()
1804
1805
1806def get_config_folder() -> Path:
1807  """Returns the config folder path where upload config is stored."""
1808  return Path(get_misc_dir()).joinpath('.atest')
1809
1810
1811def get_full_annotation_class_name(module_info, class_name):
1812  """Get fully qualified class name from a class name.
1813
1814  If the given keyword(class_name) is "smalltest", this method can search
1815  among source codes and grep the accurate annotation class name:
1816
1817      androidx.test.filters.SmallTest
1818
1819  Args:
1820      module_info: A dict of module_info.
1821      class_name: A string of class name.
1822
1823  Returns:
1824      A string of fully qualified class name, empty string otherwise.
1825  """
1826  fullname_re = re.compile(
1827      r'import\s+(?P<fqcn>{})(|;)$'.format(class_name), re.I
1828  )
1829  keyword_re = re.compile(
1830      r'import\s+(?P<fqcn>.*\.{})(|;)$'.format(class_name), re.I
1831  )
1832  build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1833  for f in module_info.get(constants.MODULE_SRCS, []):
1834    full_path = build_top.joinpath(f)
1835    with open(full_path, 'r', encoding='utf-8') as cache:
1836      for line in cache.readlines():
1837        # Accept full class name.
1838        match = fullname_re.match(line)
1839        if match:
1840          return match.group('fqcn')
1841        # Search annotation class from keyword.
1842        match = keyword_re.match(line)
1843        if match:
1844          return match.group('fqcn')
1845  return ''
1846
1847
1848def has_mixed_type_filters(test_infos):
1849  """There are different types in a test module.
1850
1851  Dict test_to_types is mapping module name and the set of types.
1852  For example,
1853  {
1854      'module_1': {'wildcard class_method'},
1855      'module_2': {'wildcard class_method', 'regular class_method'},
1856      'module_3': set()
1857      }
1858
1859  Args:
1860      test_infos: A set of TestInfos.
1861
1862  Returns:
1863      True if more than one filter type in a test module, False otherwise.
1864  """
1865  test_to_types = {}
1866  for test_info in test_infos:
1867    filters = test_info.data.get(constants.TI_FILTER, [])
1868    filter_types = set()
1869    for flt in filters:
1870      filter_types |= get_filter_types(flt.to_list_of_tf_strings())
1871    filter_types |= test_to_types.get(test_info.test_name, set())
1872    test_to_types[test_info.test_name] = filter_types
1873  for _, types in test_to_types.items():
1874    if len(types) > 1:
1875      return True
1876  return False
1877
1878
1879def get_filter_types(tf_filter_set):
1880  """Get filter types.
1881
1882  Args:
1883      tf_filter_set: A list of tf filter strings.
1884
1885  Returns:
1886      A set of FilterType.
1887  """
1888  type_set = set()
1889  for tf_filter in tf_filter_set:
1890    if _WILDCARD_FILTER_RE.match(tf_filter):
1891      logging.debug(
1892          'Filter and type: (%s, %s)',
1893          tf_filter,
1894          FilterType.WILDCARD_FILTER.value,
1895      )
1896      type_set.add(FilterType.WILDCARD_FILTER.value)
1897    if _REGULAR_FILTER_RE.match(tf_filter):
1898      logging.debug(
1899          'Filter and type: (%s, %s)',
1900          tf_filter,
1901          FilterType.REGULAR_FILTER.value,
1902      )
1903      type_set.add(FilterType.REGULAR_FILTER.value)
1904  return type_set
1905
1906
1907def has_command(cmd: str) -> bool:
1908  """Detect if the command is available in PATH.
1909
1910  Args:
1911      cmd: A string of the tested command.
1912
1913  Returns:
1914      True if found, False otherwise.
1915  """
1916  return bool(shutil.which(cmd))
1917
1918
1919# pylint: disable=anomalous-backslash-in-string,too-many-branches
1920def get_bp_content(filename: Path, module_type: str) -> Dict:
1921  """Get essential content info from an Android.bp.
1922
1923  By specifying module_type (e.g. 'android_test', 'android_app'), this method
1924  can parse the given starting point and grab 'name', 'instrumentation_for' and
1925  'manifest'.
1926
1927  Returns:
1928      A dict of mapping test module and target module; e.g.
1929      {
1930       'FooUnitTests':
1931           {'manifest': 'AndroidManifest.xml', 'target_module': 'Foo'},
1932       'Foo':
1933           {'manifest': 'AndroidManifest-common.xml', 'target_module': ''}
1934      }
1935      Null dict if there is no content of the given module_type.
1936  """
1937  build_file = Path(filename)
1938  if not any((build_file.suffix == '.bp', build_file.is_file())):
1939    return {}
1940  start_from = re.compile(f'^{module_type}\s*\{{')
1941  end_with = re.compile(r'^\}$')
1942  context_re = re.compile(
1943      r'\s*(?P<key>(name|manifest|instrumentation_for))\s*:'
1944      r'\s*\"(?P<value>.*)\"\s*,',
1945      re.M,
1946  )
1947  with open(build_file, 'r', encoding='utf-8') as cache:
1948    data = cache.readlines()
1949  content_dict = {}
1950  start_recording = False
1951  for _line in data:
1952    line = _line.strip()
1953    if re.match(start_from, line):
1954      start_recording = True
1955      _dict = {}
1956      continue
1957    if start_recording:
1958      if not re.match(end_with, line):
1959        match = re.match(context_re, line)
1960        if match:
1961          _dict.update({match.group('key'): match.group('value')})
1962      else:
1963        start_recording = False
1964        module_name = _dict.get('name')
1965        if module_name:
1966          content_dict.update({
1967              module_name: {
1968                  'manifest': _dict.get('manifest', 'AndroidManifest.xml'),
1969                  'target_module': _dict.get('instrumentation_for', ''),
1970              }
1971          })
1972  return content_dict
1973
1974
1975def get_manifest_info(manifest: Path) -> Dict[str, Any]:
1976  """Get the essential info from the given manifest file.
1977
1978  This method cares only three attributes:
1979
1980      * package
1981      * targetPackage
1982      * persistent
1983  For an instrumentation test, the result will be like:
1984  {
1985      'package': 'com.android.foo.tests.unit',
1986      'targetPackage': 'com.android.foo',
1987      'persistent': False
1988  }
1989  For a target module of the instrumentation test:
1990  {
1991      'package': 'com.android.foo',
1992      'targetPackage': '',
1993      'persistent': True
1994  }
1995  """
1996  mdict = {'package': '', 'target_package': '', 'persistent': False}
1997  try:
1998    xml_root = ET.parse(manifest).getroot()
1999  except (ET.ParseError, FileNotFoundError):
2000    return mdict
2001  manifest_package_re = re.compile(r'[a-z][\w]+(\.[\w]+)*')
2002  # 1. Must probe 'package' name from the top.
2003  for item in xml_root.findall('.'):
2004    if 'package' in item.attrib.keys():
2005      pkg = item.attrib.get('package')
2006      match = manifest_package_re.match(pkg)
2007      if match:
2008        mdict['package'] = pkg
2009        break
2010  for item in xml_root.findall('*'):
2011    # 2. Probe 'targetPackage' in 'instrumentation' tag.
2012    if item.tag == 'instrumentation':
2013      for key, value in item.attrib.items():
2014        if 'targetPackage' in key:
2015          mdict['target_package'] = value
2016          break
2017    # 3. Probe 'persistent' in any tags.
2018    for key, value in item.attrib.items():
2019      if 'persistent' in key:
2020        mdict['persistent'] = value.lower() == 'true'
2021        break
2022  return mdict
2023
2024
2025# pylint: disable=broad-except
2026def generate_result_html(result_file: Path) -> Path:
2027  """Generate a html that collects all log files."""
2028  result_file = Path(result_file)
2029  search_dir = Path(result_file).parent
2030  result_html = Path(result_file.parent, 'local_log_file_list.html')
2031  try:
2032    logs = sorted(find_files(str(search_dir), file_name='*', followlinks=True))
2033    with open(result_html, 'w', encoding='utf-8') as cache:
2034      cache.write('<!DOCTYPE html><html><body>')
2035      result = load_json_safely(result_file)
2036      if result:
2037        cache.write(f'<h1>{"atest " + result.get("args")}</h1>')
2038        timestamp = datetime.datetime.fromtimestamp(result_file.stat().st_ctime)
2039        cache.write(f'<h2>{timestamp}</h2>')
2040      for log in logs:
2041        cache.write(
2042            f'<p><a href="{urllib.parse.quote(log)}">'
2043            f'{html.escape(Path(log).relative_to(search_dir).as_posix())}</a></p>'
2044        )
2045      cache.write('</body></html>')
2046    send_tradeded_elapsed_time_metric(search_dir)
2047    return result_html
2048  except Exception as e:
2049    logging.debug('Did not generate log html for reason: %s', e)
2050    return None
2051
2052
2053def send_tradeded_elapsed_time_metric(search_dir: Path):
2054  """Method which sends Tradefed elapsed time to the metrics."""
2055  test, prep, teardown = get_tradefed_invocation_time(search_dir)
2056  metrics.LocalDetectEvent(
2057      detect_type=DetectType.TF_TOTAL_RUN_MS, result=test + prep + teardown
2058  )
2059  metrics.LocalDetectEvent(
2060      detect_type=DetectType.TF_PREPARATION_MS, result=prep
2061  )
2062  metrics.LocalDetectEvent(detect_type=DetectType.TF_TEST_MS, result=test)
2063  metrics.LocalDetectEvent(
2064      detect_type=DetectType.TF_TEARDOWN_MS, result=teardown
2065  )
2066
2067
2068def get_tradefed_invocation_time(search_dir: Path) -> Tuple[int, int, int]:
2069  """Return a tuple of testing, preparation and teardown time."""
2070  test, prep, teardown = 0, 0, 0
2071  end_host_log_files = find_files(
2072      path=search_dir, file_name='end_host_log_*.txt', followlinks=True
2073  )
2074  for log in end_host_log_files:
2075    with open(log, 'r', encoding='utf-8') as cache:
2076      contents = cache.read().splitlines()
2077
2078    parse_test_time, parse_prep_time = False, False
2079    # ============================================
2080    # ================= Results ==================
2081    # =============== Consumed Time ==============
2082    #     x86_64 HelloWorldTests: 1s
2083    #     x86_64 hallo-welt: 866 ms
2084    # Total aggregated tests run time: 1s
2085    # ============== Modules Preparation Times ==============
2086    #     x86_64 HelloWorldTests => prep = 2483 ms || clean = 294 ms
2087    #     x86_64 hallo-welt => prep = 1845 ms || clean = 292 ms
2088    # Total preparation time: 4s  ||  Total tear down time: 586 ms
2089    # =======================================================
2090    # =============== Summary ===============
2091    # Total Run time: 6s
2092    # 2/2 modules completed
2093    # Total Tests       : 3
2094    # PASSED            : 3
2095    # FAILED            : 0
2096    # ============== End of Results ==============
2097    # ============================================
2098    for line in contents:
2099      if re.match(r'[=]+.*consumed.*time.*[=]+', line, re.I):
2100        parse_test_time, parse_prep_time = True, False
2101        continue
2102      if re.match(r'[=]+.*preparation.*time.*[=]+', line, re.I):
2103        parse_test_time, parse_prep_time = False, True
2104        continue
2105      # Close parsing when `Total` keyword starts at the beginning.
2106      if re.match(r'^(Total.*)', line, re.I):
2107        parse_test_time, parse_prep_time = False, False
2108        continue
2109      if parse_test_time:
2110        match = re.search(r'^[\s]+\w.*:\s+(?P<timestr>.*)$', line, re.I)
2111        if match:
2112          test += convert_timestr_to_ms(match.group('timestr'))
2113        continue
2114      if parse_prep_time:
2115        # SuiteResultReporter.java defines elapsed prep time only in ms.
2116        match = re.search(
2117            r'prep = (?P<prep>\d+ ms) \|\| clean = (?P<clean>\d+ ms)$',
2118            line,
2119            re.I,
2120        )
2121        if match:
2122          prep += convert_timestr_to_ms(match.group('prep'))
2123          teardown += convert_timestr_to_ms(match.group('clean'))
2124        continue
2125
2126  return test, prep, teardown
2127
2128
2129def convert_timestr_to_ms(time_string: str = None) -> int:
2130  """Convert time string to an integer in millisecond.
2131
2132  Possible time strings are:
2133      1h 21m 15s
2134      1m 5s
2135      25s
2136  If elapsed time is less than 1 sec, the time will be in millisecond.
2137      233 ms
2138  """
2139  if not time_string:
2140    return 0
2141
2142  hours, minutes, seconds = 0, 0, 0
2143  # Extract hour(<h>), minute(<m>), second(<s>), or millisecond(<ms>).
2144  match = re.match(
2145      r'(((?P<h>\d+)h\s+)?(?P<m>\d+)m\s+)?(?P<s>\d+)s|(?P<ms>\d+)\s*ms',
2146      time_string,
2147  )
2148  if match:
2149    hours = int(match.group('h')) if match.group('h') else 0
2150    minutes = int(match.group('m')) if match.group('m') else 0
2151    seconds = int(match.group('s')) if match.group('s') else 0
2152    milliseconds = int(match.group('ms')) if match.group('ms') else 0
2153
2154  return (
2155      hours * 3600 * 1000 + minutes * 60 * 1000 + seconds * 1000 + milliseconds
2156  )
2157
2158
2159# pylint: disable=broad-except
2160def prompt_suggestions(result_file: Path):
2161  """Generate suggestions when detecting keywords in logs."""
2162  result_file = Path(result_file)
2163  search_dir = Path(result_file).parent.joinpath('log')
2164  logs = sorted(find_files(str(search_dir), file_name='*'))
2165  for log in logs:
2166    for keyword, suggestion in SUGGESTIONS.items():
2167      try:
2168        with open(log, 'r', encoding='utf-8') as cache:
2169          content = cache.read()
2170          if keyword in content:
2171            colorful_print('[Suggestion] ' + suggestion, color=constants.RED)
2172            break
2173      # If the given is not a plain text, just ignore it.
2174      except Exception:
2175        pass
2176
2177
2178# pylint: disable=invalid-name
2179def get_rbe_and_customized_out_state() -> int:
2180  """Return decimal state of RBE and customized out.
2181
2182  Customizing out dir (OUT_DIR/OUT_DIR_COMMON_BASE) dramatically slows down
2183  the RBE performance; by collecting the combined state of the two states,
2184  we can profile the performance relationship between RBE and the build time.
2185
2186  Returns:
2187      An integer that describes the combined state.
2188  """
2189  #    RBE  | out_dir |  decimal
2190  # --------+---------+---------
2191  #     0   |    0    |    0
2192  #     0   |    1    |    1
2193  #     1   |    0    |    2
2194  #     1   |    1    |    3    --> Caution for poor performance.
2195  ON = '1'
2196  OFF = '0'
2197  # 1. ensure RBE is enabled during the build.
2198  actual_out_dir = get_build_out_dir()
2199  log_path = actual_out_dir.joinpath('soong.log')
2200  rbe_enabled = not bool(
2201      subprocess.call(f'grep -q USE_RBE=true {log_path}'.split())
2202  )
2203  rbe_state = ON if rbe_enabled else OFF
2204
2205  # 2. The customized out path will be different from the regular one.
2206  regular_out_dir = Path(os.getenv(constants.ANDROID_BUILD_TOP), 'out')
2207  customized_out = OFF if actual_out_dir == regular_out_dir else ON
2208
2209  return int(rbe_state + customized_out, 2)
2210
2211
2212def build_files_integrity_is_ok() -> bool:
2213  """Return Whether the integrity of build files is OK."""
2214  # 0. Missing timestamp file or plocate.db means a fresh repo sync.
2215  timestamp_file = get_index_path(constants.BUILDFILES_STP)
2216  locate_cache = get_index_path(constants.LOCATE_CACHE)
2217  if not timestamp_file.is_file():
2218    logging.debug('timestamp_file %s is missing', timestamp_file)
2219    return False
2220  if not locate_cache.is_file():
2221    logging.debug('locate_cache file %s is missing', locate_cache)
2222    return False
2223
2224  # 1. Ensure no build files were added/deleted.
2225  recorded_amount = len(load_json_safely(timestamp_file).keys())
2226  cmd_out = subprocess.getoutput(
2227      f'locate -e -d{locate_cache} --regex ' r'"/Android\.(bp|mk)$" | wc -l'
2228  )
2229  if int(cmd_out) != recorded_amount:
2230    logging.debug(
2231        'Some build files are added/deleted. Recorded number of files: %s,'
2232        ' actual: %s',
2233        recorded_amount,
2234        cmd_out,
2235    )
2236    return False
2237
2238  # 2. Ensure the consistency of all build files.
2239  for file, timestamp in load_json_safely(timestamp_file).items():
2240    if Path(file).exists() and Path(file).stat().st_mtime != timestamp:
2241      logging.debug(
2242          'A build file is changed: %s. Recorded timestamp: %s, actual'
2243          ' timestamp: %s',
2244          file,
2245          timestamp,
2246          Path(file).stat().st_mtime,
2247      )
2248      return False
2249  return True
2250
2251
2252def _build_env_profiling() -> BuildEnvProfiler:
2253  """Determine the status profile before build.
2254
2255  The BuildEnvProfiler object can help use determine whether a build is:
2256      1. clean build. (empty out/ dir)
2257      2. Build files Integrity (Android.bp/Android.mk changes).
2258      3. Environment variables consistency.
2259      4. New Ninja file generated. (mtime of soong/build.ninja)
2260
2261  Returns:
2262      the BuildProfile object.
2263  """
2264  out_dir = get_build_out_dir()
2265  ninja_file = out_dir.joinpath('soong/build.ninja')
2266  mtime = ninja_file.stat().st_mtime if ninja_file.is_file() else 0
2267  variables_file = out_dir.joinpath('soong/soong.environment.used.build')
2268
2269  return BuildEnvProfiler(
2270      ninja_file=ninja_file,
2271      ninja_file_mtime=mtime,
2272      variable_file=variables_file,
2273      variable_file_md5=md5sum(variables_file),
2274      clean_out=not ninja_file.exists(),
2275      build_files_integrity=build_files_integrity_is_ok(),
2276  )
2277
2278
2279def _send_build_condition_metrics(
2280    build_profile: BuildEnvProfiler, cmd: List[str]
2281):
2282  """Send build conditions by comparing build env profilers."""
2283
2284  # when build module-info.json only, 'module-info.json' will be
2285  # the last element.
2286  m_mod_info_only = 'module-info.json' in cmd.pop()
2287
2288  def ninja_file_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2289    """Determine whether the ninja file had been renewal."""
2290    if not env_profiler.ninja_file.is_file():
2291      return True
2292    return (
2293        env_profiler.ninja_file.stat().st_mtime != env_profiler.ninja_file_mtime
2294    )
2295
2296  def env_var_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2297    """Determine whether soong-related variables had changed."""
2298    return md5sum(env_profiler.variable_file) != env_profiler.variable_file_md5
2299
2300  def send_data(detect_type, value=1):
2301    """A simple wrapper of metrics.LocalDetectEvent."""
2302    metrics.LocalDetectEvent(detect_type=detect_type, result=value)
2303
2304  send_data(DetectType.RBE_STATE, get_rbe_and_customized_out_state())
2305
2306  # Determine the correct detect type before profiling.
2307  # (build module-info.json or build dependencies.)
2308  clean_out = (
2309      DetectType.MODULE_INFO_CLEAN_OUT
2310      if m_mod_info_only
2311      else DetectType.BUILD_CLEAN_OUT
2312  )
2313  ninja_generation = (
2314      DetectType.MODULE_INFO_GEN_NINJA
2315      if m_mod_info_only
2316      else DetectType.BUILD_GEN_NINJA
2317  )
2318  bpmk_change = (
2319      DetectType.MODULE_INFO_BPMK_CHANGE
2320      if m_mod_info_only
2321      else DetectType.BUILD_BPMK_CHANGE
2322  )
2323  env_change = (
2324      DetectType.MODULE_INFO_ENV_CHANGE
2325      if m_mod_info_only
2326      else DetectType.BUILD_ENV_CHANGE
2327  )
2328  src_change = (
2329      DetectType.MODULE_INFO_SRC_CHANGE
2330      if m_mod_info_only
2331      else DetectType.BUILD_SRC_CHANGE
2332  )
2333  other = (
2334      DetectType.MODULE_INFO_OTHER
2335      if m_mod_info_only
2336      else DetectType.BUILD_OTHER
2337  )
2338  incremental = (
2339      DetectType.MODULE_INFO_INCREMENTAL
2340      if m_mod_info_only
2341      else DetectType.BUILD_INCREMENTAL
2342  )
2343
2344  if build_profile.clean_out:
2345    send_data(clean_out)
2346  else:
2347    send_data(incremental)
2348
2349  if ninja_file_is_changed(build_profile):
2350    send_data(ninja_generation)
2351
2352  other_condition = True
2353  if not build_profile.build_files_integrity:
2354    send_data(bpmk_change)
2355    other_condition = False
2356  if env_var_is_changed(build_profile):
2357    send_data(env_change)
2358    other_condition = False
2359  if bool(get_modified_files(os.getcwd())):
2360    send_data(src_change)
2361    other_condition = False
2362  if other_condition:
2363    send_data(other)
2364