1#!/usr/bin/env vpython3 2# 3# Copyright 2013 The Chromium Authors 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7"""Runs all types of tests from one unified interface.""" 8 9from __future__ import absolute_import 10import argparse 11import collections 12import contextlib 13import io 14import itertools 15import logging 16import os 17import re 18import shlex 19import shutil 20import signal 21import sys 22import tempfile 23import threading 24import traceback 25import unittest 26 27# Import _strptime before threaded code. datetime.datetime.strptime is 28# threadsafe except for the initial import of the _strptime module. 29# See http://crbug.com/724524 and https://bugs.python.org/issue7980. 30import _strptime # pylint: disable=unused-import 31 32# pylint: disable=ungrouped-imports 33from pylib.constants import host_paths 34 35if host_paths.DEVIL_PATH not in sys.path: 36 sys.path.append(host_paths.DEVIL_PATH) 37 38from devil import base_error 39from devil.utils import reraiser_thread 40from devil.utils import run_tests_helper 41 42from pylib import constants 43from pylib.base import base_test_result 44from pylib.base import environment_factory 45from pylib.base import output_manager 46from pylib.base import output_manager_factory 47from pylib.base import test_instance_factory 48from pylib.base import test_run_factory 49from pylib.results import json_results 50from pylib.results import report_results 51from pylib.results.presentation import test_results_presentation 52from pylib.utils import local_utils 53from pylib.utils import logdog_helper 54from pylib.utils import logging_utils 55from pylib.utils import test_filter 56 57from py_utils import contextlib_ext 58 59from lib.proto import exception_recorder 60from lib.results import result_sink 61 62_DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( 63 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) 64 65_RERUN_FAILED_TESTS_FILE = 'rerun_failed_tests.filter' 66 67 68def _RealPath(arg): 69 if arg.startswith('//'): 70 arg = os.path.abspath(os.path.join(host_paths.DIR_SOURCE_ROOT, 71 arg[2:].replace('/', os.sep))) 72 return os.path.realpath(arg) 73 74 75def AddTestLauncherOptions(parser): 76 """Adds arguments mirroring //base/test/launcher. 77 78 Args: 79 parser: The parser to which arguments should be added. 80 Returns: 81 The given parser. 82 """ 83 parser.add_argument( 84 '--test-launcher-retry-limit', 85 '--test_launcher_retry_limit', 86 '--num_retries', '--num-retries', 87 '--isolated-script-test-launcher-retry-limit', 88 dest='num_retries', type=int, default=2, 89 help='Number of retries for a test before ' 90 'giving up (default: %(default)s).') 91 parser.add_argument( 92 '--test-launcher-summary-output', 93 '--json-results-file', 94 dest='json_results_file', type=os.path.realpath, 95 help='If set, will dump results in JSON form to the specified file. ' 96 'Note that this will also trigger saving per-test logcats to ' 97 'logdog.') 98 parser.add_argument( 99 '--test-launcher-shard-index', 100 type=int, default=os.environ.get('GTEST_SHARD_INDEX', 0), 101 help='Index of the external shard to run.') 102 parser.add_argument( 103 '--test-launcher-total-shards', 104 type=int, default=os.environ.get('GTEST_TOTAL_SHARDS', 1), 105 help='Total number of external shards.') 106 107 test_filter.AddFilterOptions(parser) 108 109 return parser 110 111 112def AddCommandLineOptions(parser): 113 """Adds arguments to support passing command-line flags to the device.""" 114 parser.add_argument( 115 '--device-flags-file', 116 type=os.path.realpath, 117 help='The relative filepath to a file containing ' 118 'command-line flags to set on the device') 119 parser.add_argument( 120 '--use-apk-under-test-flags-file', 121 action='store_true', 122 help='Wether to use the flags file for the apk under test. If set, ' 123 "the filename will be looked up in the APK's PackageInfo.") 124 parser.add_argument('--variations-test-seed-path', 125 type=os.path.relpath, 126 default=None, 127 help='Path to variations seed file.') 128 parser.add_argument('--webview-variations-test-seed-path', 129 type=os.path.relpath, 130 default=None, 131 help='Path to variations seed file for WebView.') 132 133 parser.set_defaults(allow_unknown=True) 134 parser.set_defaults(command_line_flags=None) 135 136 137def AddTracingOptions(parser): 138 # TODO(shenghuazhang): Move this into AddCommonOptions once it's supported 139 # for all test types. 140 parser.add_argument( 141 '--trace-output', 142 metavar='FILENAME', type=os.path.realpath, 143 help='Path to save test_runner trace json output to.') 144 145 parser.add_argument( 146 '--trace-all', 147 action='store_true', 148 help='Whether to trace all function calls.') 149 150 151def AddCommonOptions(parser): 152 """Adds all common options to |parser|.""" 153 154 default_build_type = os.environ.get('BUILDTYPE', 'Debug') 155 156 debug_or_release_group = parser.add_mutually_exclusive_group() 157 debug_or_release_group.add_argument( 158 '--debug', 159 action='store_const', const='Debug', dest='build_type', 160 default=default_build_type, 161 help='If set, run test suites under out/Debug. ' 162 'Default is env var BUILDTYPE or Debug.') 163 debug_or_release_group.add_argument( 164 '--release', 165 action='store_const', const='Release', dest='build_type', 166 help='If set, run test suites under out/Release. ' 167 'Default is env var BUILDTYPE or Debug.') 168 169 parser.add_argument( 170 '--break-on-failure', '--break_on_failure', 171 dest='break_on_failure', action='store_true', 172 help='Whether to break on failure.') 173 174 # TODO(jbudorick): Remove this once everything has switched to platform 175 # mode. 176 parser.add_argument( 177 '--enable-platform-mode', 178 action='store_true', 179 help='Run the test scripts in platform mode, which ' 180 'conceptually separates the test runner from the ' 181 '"device" (local or remote, real or emulated) on ' 182 'which the tests are running. [experimental]') 183 184 parser.add_argument( 185 '-e', '--environment', 186 default='local', choices=constants.VALID_ENVIRONMENTS, 187 help='Test environment to run in (default: %(default)s).') 188 189 parser.add_argument( 190 '--local-output', 191 action='store_true', 192 help='Whether to archive test output locally and generate ' 193 'a local results detail page.') 194 195 parser.add_argument('--list-tests', 196 action='store_true', 197 help='List available tests and exit.') 198 199 parser.add_argument('--wrapper-script-args', 200 help='A string of args that were passed to the wrapper ' 201 'script. This should probably not be edited by a ' 202 'user as it is passed by the wrapper itself.') 203 204 class FastLocalDevAction(argparse.Action): 205 def __call__(self, parser, namespace, values, option_string=None): 206 namespace.enable_concurrent_adb = True 207 namespace.enable_device_cache = True 208 namespace.extract_test_list_from_filter = True 209 namespace.local_output = True 210 namespace.num_retries = 0 211 namespace.skip_clear_data = True 212 namespace.use_persistent_shell = True 213 214 parser.add_argument( 215 '--fast-local-dev', 216 type=bool, 217 nargs=0, 218 action=FastLocalDevAction, 219 help='Alias for: --num-retries=0 --enable-device-cache ' 220 '--enable-concurrent-adb --skip-clear-data ' 221 '--extract-test-list-from-filter --use-persistent-shell --local-output') 222 223 # TODO(jbudorick): Remove this once downstream bots have switched to 224 # api.test_results. 225 parser.add_argument( 226 '--flakiness-dashboard-server', 227 dest='flakiness_dashboard_server', 228 help=argparse.SUPPRESS) 229 parser.add_argument( 230 '--gs-results-bucket', 231 help='Google Storage bucket to upload results to.') 232 233 parser.add_argument( 234 '--output-directory', 235 dest='output_directory', type=os.path.realpath, 236 help='Path to the directory in which build files are' 237 ' located (must include build type). This will take' 238 ' precedence over --debug and --release') 239 parser.add_argument( 240 '-v', '--verbose', 241 dest='verbose_count', default=0, action='count', 242 help='Verbose level (multiple times for more)') 243 244 parser.add_argument( 245 '--repeat', '--gtest_repeat', '--gtest-repeat', 246 '--isolated-script-test-repeat', 247 dest='repeat', type=int, default=0, 248 help='Number of times to repeat the specified set of tests.') 249 250 # Not useful for junit tests. 251 parser.add_argument( 252 '--use-persistent-shell', 253 action='store_true', 254 help='Uses a persistent shell connection for the adb connection.') 255 256 parser.add_argument('--disable-test-server', 257 action='store_true', 258 help='Disables SpawnedTestServer which doesn' 259 't work with remote adb. ' 260 'WARNING: Will break tests which require the server.') 261 262 # This is currently only implemented for gtests and instrumentation tests. 263 parser.add_argument( 264 '--gtest_also_run_disabled_tests', '--gtest-also-run-disabled-tests', 265 '--isolated-script-test-also-run-disabled-tests', 266 dest='run_disabled', action='store_true', 267 help='Also run disabled tests if applicable.') 268 269 # These are currently only implemented for gtests. 270 parser.add_argument('--isolated-script-test-output', 271 help='If present, store test results on this path.') 272 parser.add_argument('--isolated-script-test-perf-output', 273 help='If present, store chartjson results on this path.') 274 parser.add_argument('--timeout-scale', 275 type=float, 276 help='Factor by which timeouts should be scaled.') 277 278 AddTestLauncherOptions(parser) 279 280 281def ProcessCommonOptions(args): 282 """Processes and handles all common options.""" 283 run_tests_helper.SetLogLevel(args.verbose_count, add_handler=False) 284 if args.verbose_count > 0: 285 handler = logging_utils.ColorStreamHandler() 286 else: 287 handler = logging.StreamHandler(sys.stdout) 288 handler.setFormatter(run_tests_helper.CustomFormatter()) 289 logging.getLogger().addHandler(handler) 290 291 constants.SetBuildType(args.build_type) 292 if args.output_directory: 293 constants.SetOutputDirectory(args.output_directory) 294 295 296def AddDeviceOptions(parser): 297 """Adds device options to |parser|.""" 298 299 parser = parser.add_argument_group('device arguments') 300 301 parser.add_argument( 302 '--adb-path', 303 type=os.path.realpath, 304 help='Specify the absolute path of the adb binary that ' 305 'should be used.') 306 parser.add_argument( 307 '--use-local-devil-tools', 308 action='store_true', 309 help='Use locally built versions of tools used by devil_chromium.') 310 parser.add_argument('--denylist-file', 311 type=os.path.realpath, 312 help='Device denylist file.') 313 parser.add_argument( 314 '-d', '--device', nargs='+', 315 dest='test_devices', 316 help='Target device(s) for the test suite to run on.') 317 parser.add_argument( 318 '--enable-concurrent-adb', 319 action='store_true', 320 help='Run multiple adb commands at the same time, even ' 321 'for the same device.') 322 parser.add_argument( 323 '--enable-device-cache', 324 action='store_true', 325 help='Cache device state to disk between runs') 326 parser.add_argument('--list-data', 327 action='store_true', 328 help='List files pushed to device and exit.') 329 parser.add_argument('--skip-clear-data', 330 action='store_true', 331 help='Do not wipe app data between tests. Use this to ' 332 'speed up local development and never on bots ' 333 '(increases flakiness)') 334 parser.add_argument( 335 '--recover-devices', 336 action='store_true', 337 help='Attempt to recover devices prior to the final retry. Warning: ' 338 'this will cause all devices to reboot.') 339 340 parser.add_argument( 341 '--upload-logcats-file', 342 action='store_true', 343 dest='upload_logcats_file', 344 help='Whether to upload logcat file to logdog.') 345 346 logcat_output_group = parser.add_mutually_exclusive_group() 347 logcat_output_group.add_argument( 348 '--logcat-output-dir', type=os.path.realpath, 349 help='If set, will dump logcats recorded during test run to directory. ' 350 'File names will be the device ids with timestamps.') 351 logcat_output_group.add_argument( 352 '--logcat-output-file', type=os.path.realpath, 353 help='If set, will merge logcats recorded during test run and dump them ' 354 'to the specified file.') 355 356 parser.add_argument( 357 '--force-main-user', 358 action='store_true', 359 help='Force the applicable adb commands to run with "--user" param set ' 360 'to the id of the main user on device. Only use when the main user is a ' 361 'secondary user, e.g. Android Automotive OS.') 362 363 364def AddEmulatorOptions(parser): 365 """Adds emulator-specific options to |parser|.""" 366 parser = parser.add_argument_group('emulator arguments') 367 368 parser.add_argument( 369 '--avd-config', 370 type=os.path.realpath, 371 help='Path to the avd config textpb. ' 372 '(See //tools/android/avd/proto/ for message definition' 373 ' and existing textpb files.)') 374 parser.add_argument( 375 '--emulator-count', 376 type=int, 377 default=1, 378 help='Number of emulators to use.') 379 parser.add_argument( 380 '--emulator-window', 381 action='store_true', 382 default=False, 383 help='Enable graphical window display on the emulator.') 384 parser.add_argument( 385 '--emulator-debug-tags', 386 help='Comma-separated list of debug tags. This can be used to enable or ' 387 'disable debug messages from specific parts of the emulator, e.g. ' 388 'init,snapshot. See "emulator -help-debug-tags" ' 389 'for a full list of tags.') 390 parser.add_argument( 391 '--emulator-enable-network', 392 action='store_true', 393 help='Enable the network (WiFi and mobile data) on the emulator.') 394 395 396def AddGTestOptions(parser): 397 """Adds gtest options to |parser|.""" 398 399 parser = parser.add_argument_group('gtest arguments') 400 401 parser.add_argument( 402 '--additional-apk', 403 action='append', dest='additional_apks', default=[], 404 type=_RealPath, 405 help='Additional apk that must be installed on ' 406 'the device when the tests are run.') 407 parser.add_argument( 408 '--app-data-file', 409 action='append', dest='app_data_files', 410 help='A file path relative to the app data directory ' 411 'that should be saved to the host.') 412 parser.add_argument( 413 '--app-data-file-dir', 414 help='Host directory to which app data files will be' 415 ' saved. Used with --app-data-file.') 416 parser.add_argument( 417 '--enable-xml-result-parsing', 418 action='store_true', help=argparse.SUPPRESS) 419 parser.add_argument( 420 '--executable-dist-dir', 421 type=os.path.realpath, 422 help="Path to executable's dist directory for native" 423 " (non-apk) tests.") 424 parser.add_argument( 425 '--extract-test-list-from-filter', 426 action='store_true', 427 help='When a test filter is specified, and the list of ' 428 'tests can be determined from it, skip querying the ' 429 'device for the list of all tests. Speeds up local ' 430 'development, but is not safe to use on bots (' 431 'http://crbug.com/549214') 432 parser.add_argument( 433 '--gs-test-artifacts-bucket', 434 help=('If present, test artifacts will be uploaded to this Google ' 435 'Storage bucket.')) 436 parser.add_argument( 437 '--render-test-output-dir', 438 help='If present, store rendering artifacts in this path.') 439 parser.add_argument( 440 '--runtime-deps-path', 441 dest='runtime_deps_path', type=os.path.realpath, 442 help='Runtime data dependency file from GN.') 443 parser.add_argument( 444 '-t', '--shard-timeout', 445 dest='shard_timeout', type=int, default=120, 446 help='Timeout to wait for each test (default: %(default)s).') 447 parser.add_argument( 448 '--store-tombstones', 449 dest='store_tombstones', action='store_true', 450 help='Add tombstones in results if crash.') 451 parser.add_argument( 452 '-s', '--suite', 453 dest='suite_name', nargs='+', metavar='SUITE_NAME', required=True, 454 help='Executable name of the test suite to run.') 455 parser.add_argument( 456 '--test-apk-incremental-install-json', 457 type=os.path.realpath, 458 help='Path to install json for the test apk.') 459 parser.add_argument('--test-launcher-batch-limit', 460 dest='test_launcher_batch_limit', 461 type=int, 462 help='The max number of tests to run in a shard. ' 463 'Ignores non-positive ints and those greater than ' 464 'MAX_SHARDS') 465 parser.add_argument( 466 '-w', '--wait-for-java-debugger', action='store_true', 467 help='Wait for java debugger to attach before running any application ' 468 'code. Also disables test timeouts and sets retries=0.') 469 parser.add_argument( 470 '--coverage-dir', 471 type=os.path.realpath, 472 help='Directory in which to place all generated coverage files.') 473 parser.add_argument( 474 '--use-existing-test-data', 475 action='store_true', 476 help='Do not push new files to the device, instead using existing APK ' 477 'and test data. Only use when running the same test for multiple ' 478 'iterations.') 479 # This is currently only implemented for gtests tests. 480 parser.add_argument('--gtest_also_run_pre_tests', 481 '--gtest-also-run-pre-tests', 482 dest='run_pre_tests', 483 action='store_true', 484 help='Also run PRE_ tests if applicable.') 485 486 487def AddInstrumentationTestOptions(parser): 488 """Adds Instrumentation test options to |parser|.""" 489 490 parser = parser.add_argument_group('instrumentation arguments') 491 492 parser.add_argument('--additional-apex', 493 action='append', 494 dest='additional_apexs', 495 default=[], 496 type=_RealPath, 497 help='Additional apex that must be installed on ' 498 'the device when the tests are run') 499 parser.add_argument( 500 '--additional-apk', 501 action='append', dest='additional_apks', default=[], 502 type=_RealPath, 503 help='Additional apk that must be installed on ' 504 'the device when the tests are run') 505 parser.add_argument('--forced-queryable-additional-apk', 506 action='append', 507 dest='forced_queryable_additional_apks', 508 default=[], 509 type=_RealPath, 510 help='Configures an additional-apk to be forced ' 511 'to be queryable by other APKs.') 512 parser.add_argument('--instant-additional-apk', 513 action='append', 514 dest='instant_additional_apks', 515 default=[], 516 type=_RealPath, 517 help='Configures an additional-apk to be an instant APK') 518 parser.add_argument( 519 '-A', '--annotation', 520 dest='annotation_str', 521 help='Comma-separated list of annotations. Run only tests with any of ' 522 'the given annotations. An annotation can be either a key or a ' 523 'key-values pair. A test that has no annotation is considered ' 524 '"SmallTest".') 525 # TODO(jbudorick): Remove support for name-style APK specification once 526 # bots are no longer doing it. 527 parser.add_argument( 528 '--apk-under-test', 529 help='Path or name of the apk under test.') 530 parser.add_argument( 531 '--store-data-dependencies-in-temp', 532 action='store_true', 533 help='Store data dependencies in /data/local/tmp/chromium_tests_root') 534 parser.add_argument( 535 '--module', 536 action='append', 537 dest='modules', 538 help='Specify Android App Bundle modules to install in addition to the ' 539 'base module.') 540 parser.add_argument( 541 '--fake-module', 542 action='append', 543 dest='fake_modules', 544 help='Specify Android App Bundle modules to fake install in addition to ' 545 'the real modules.') 546 parser.add_argument( 547 '--additional-locale', 548 action='append', 549 dest='additional_locales', 550 help='Specify locales in addition to the device locale to install splits ' 551 'for when --apk-under-test is an Android App Bundle.') 552 parser.add_argument( 553 '--coverage-dir', 554 type=os.path.realpath, 555 help='Directory in which to place all generated ' 556 'Jacoco coverage files.') 557 parser.add_argument( 558 '--disable-dalvik-asserts', 559 dest='set_asserts', action='store_false', default=True, 560 help='Removes the dalvik.vm.enableassertions property') 561 parser.add_argument( 562 '--proguard-mapping-path', 563 help='.mapping file to use to Deobfuscate java stack traces in test ' 564 'output and logcat.') 565 parser.add_argument( 566 '-E', '--exclude-annotation', 567 dest='exclude_annotation_str', 568 help='Comma-separated list of annotations. Exclude tests with these ' 569 'annotations.') 570 parser.add_argument( 571 '--enable-breakpad-dump', 572 action='store_true', 573 help='Stores any breakpad dumps till the end of the test.') 574 parser.add_argument( 575 '--replace-system-package', 576 type=_RealPath, 577 default=None, 578 help='Use this apk to temporarily replace a system package with the same ' 579 'package name.') 580 parser.add_argument( 581 '--remove-system-package', 582 default=[], 583 action='append', 584 dest='system_packages_to_remove', 585 help='Specifies a system package to remove before testing if it exists ' 586 'on the system. WARNING: THIS WILL PERMANENTLY REMOVE THE SYSTEM APP. ' 587 'Unlike --replace-system-package, the app will not be restored after ' 588 'tests are finished.') 589 parser.add_argument( 590 '--use-voice-interaction-service', 591 help='This can be used to update the voice interaction service to be a ' 592 'custom one. This is useful for mocking assistants. eg: ' 593 'android.assist.service/.MainInteractionService') 594 parser.add_argument( 595 '--use-webview-provider', 596 type=_RealPath, default=None, 597 help='Use this apk as the webview provider during test. ' 598 'The original provider will be restored if possible, ' 599 "on Nougat the provider can't be determined and so " 600 'the system will choose the default provider.') 601 parser.add_argument( 602 '--webview-command-line-arg', 603 default=[], 604 action='append', 605 help="Specifies command line arguments to add to WebView's flag file") 606 parser.add_argument( 607 '--webview-process-mode', 608 choices=['single', 'multiple'], 609 help='Run WebView instrumentation tests only in the specified process ' 610 'mode. If not set, both single and multiple process modes will execute.') 611 parser.add_argument( 612 '--run-setup-command', 613 default=[], 614 action='append', 615 dest='run_setup_commands', 616 help='This can be used to run a custom shell command on the device as a ' 617 'setup step') 618 parser.add_argument( 619 '--run-teardown-command', 620 default=[], 621 action='append', 622 dest='run_teardown_commands', 623 help='This can be used to run a custom shell command on the device as a ' 624 'teardown step') 625 parser.add_argument( 626 '--runtime-deps-path', 627 dest='runtime_deps_path', type=os.path.realpath, 628 help='Runtime data dependency file from GN.') 629 parser.add_argument( 630 '--screenshot-directory', 631 dest='screenshot_dir', type=os.path.realpath, 632 help='Capture screenshots of test failures') 633 parser.add_argument( 634 '--store-tombstones', 635 action='store_true', dest='store_tombstones', 636 help='Add tombstones in results if crash.') 637 parser.add_argument( 638 '--strict-mode', 639 dest='strict_mode', default='testing', 640 help='StrictMode command-line flag set on the device, ' 641 'death/testing to kill the process, off to stop ' 642 'checking, flash to flash only. (default: %(default)s)') 643 parser.add_argument( 644 '--test-apk', 645 required=True, 646 help='Path or name of the apk containing the tests.') 647 parser.add_argument( 648 '--test-apk-as-instant', 649 action='store_true', 650 help='Install the test apk as an instant app. ' 651 'Instant apps run in a more restrictive execution environment.') 652 parser.add_argument( 653 '--test-launcher-batch-limit', 654 dest='test_launcher_batch_limit', 655 type=int, 656 help=('Not actually used for instrumentation tests, but can be used as ' 657 'a proxy for determining if the current run is a retry without ' 658 'patch.')) 659 parser.add_argument( 660 '--is-unit-test', 661 action='store_true', 662 help=('Specify the test suite as composed of unit tests, blocking ' 663 'certain operations.')) 664 parser.add_argument( 665 '-w', '--wait-for-java-debugger', action='store_true', 666 help='Wait for java debugger to attach before running any application ' 667 'code. Also disables test timeouts and sets retries=0.') 668 669 # WPR record mode. 670 parser.add_argument('--wpr-enable-record', 671 action='store_true', 672 default=False, 673 help='If true, WPR server runs in record mode.' 674 'otherwise, runs in replay mode.') 675 676 parser.add_argument( 677 '--approve-app-links', 678 help='Force enables Digital Asset Link verification for the provided ' 679 'package and domain, example usage: --approve-app-links ' 680 'com.android.package:www.example.com') 681 682 # These arguments are suppressed from the help text because they should 683 # only ever be specified by an intermediate script. 684 parser.add_argument( 685 '--apk-under-test-incremental-install-json', 686 help=argparse.SUPPRESS) 687 parser.add_argument( 688 '--test-apk-incremental-install-json', 689 type=os.path.realpath, 690 help=argparse.SUPPRESS) 691 692 693def AddSkiaGoldTestOptions(parser): 694 """Adds Skia Gold test options to |parser|.""" 695 parser = parser.add_argument_group("Skia Gold arguments") 696 parser.add_argument( 697 '--code-review-system', 698 help='A non-default code review system to pass to pass to Gold, if ' 699 'applicable') 700 parser.add_argument( 701 '--continuous-integration-system', 702 help='A non-default continuous integration system to pass to Gold, if ' 703 'applicable') 704 parser.add_argument( 705 '--git-revision', help='The git commit currently being tested.') 706 parser.add_argument( 707 '--gerrit-issue', 708 help='The Gerrit issue this test is being run on, if applicable.') 709 parser.add_argument( 710 '--gerrit-patchset', 711 help='The Gerrit patchset this test is being run on, if applicable.') 712 parser.add_argument( 713 '--buildbucket-id', 714 help='The Buildbucket build ID that this test was triggered from, if ' 715 'applicable.') 716 local_group = parser.add_mutually_exclusive_group() 717 local_group.add_argument( 718 '--local-pixel-tests', 719 action='store_true', 720 default=None, 721 help='Specifies to run the Skia Gold pixel tests in local mode. When run ' 722 'in local mode, uploading to Gold is disabled and traditional ' 723 'generated/golden/diff images are output instead of triage links. ' 724 'Running in local mode also implies --no-luci-auth. If both this ' 725 'and --no-local-pixel-tests are left unset, the test harness will ' 726 'attempt to detect whether it is running on a workstation or not ' 727 'and set the options accordingly.') 728 local_group.add_argument( 729 '--no-local-pixel-tests', 730 action='store_false', 731 dest='local_pixel_tests', 732 help='Specifies to run the Skia Gold pixel tests in non-local (bot) ' 733 'mode. When run in this mode, data is actually uploaded to Gold and ' 734 'triage links are generated. If both this and --local-pixel-tests ' 735 'are left unset, the test harness will attempt to detect whether ' 736 'it is running on a workstation or not and set the options ' 737 'accordingly.') 738 parser.add_argument( 739 '--no-luci-auth', 740 action='store_true', 741 default=False, 742 help="Don't use the serve account provided by LUCI for authentication " 743 'with Skia Gold, instead relying on gsutil to be pre-authenticated. ' 744 'Meant for testing locally instead of on the bots.') 745 parser.add_argument( 746 '--bypass-skia-gold-functionality', 747 action='store_true', 748 default=False, 749 help='Bypass all interaction with Skia Gold, effectively disabling the ' 750 'image comparison portion of any tests that use Gold. Only meant to be ' 751 'used in case a Gold outage occurs and cannot be fixed quickly.') 752 753 754def AddHostsideTestOptions(parser): 755 """Adds hostside test options to |parser|.""" 756 757 parser = parser.add_argument_group('hostside arguments') 758 759 parser.add_argument( 760 '-s', '--test-suite', required=True, 761 help='Hostside test suite to run.') 762 parser.add_argument( 763 '--test-apk-as-instant', 764 action='store_true', 765 help='Install the test apk as an instant app. ' 766 'Instant apps run in a more restrictive execution environment.') 767 parser.add_argument( 768 '--additional-apk', 769 action='append', 770 dest='additional_apks', 771 default=[], 772 type=_RealPath, 773 help='Additional apk that must be installed on ' 774 'the device when the tests are run') 775 parser.add_argument( 776 '--use-webview-provider', 777 type=_RealPath, default=None, 778 help='Use this apk as the webview provider during test. ' 779 'The original provider will be restored if possible, ' 780 "on Nougat the provider can't be determined and so " 781 'the system will choose the default provider.') 782 parser.add_argument( 783 '--tradefed-executable', 784 type=_RealPath, default=None, 785 help='Location of the cts-tradefed script') 786 parser.add_argument( 787 '--tradefed-aapt-path', 788 type=_RealPath, default=None, 789 help='Location of the directory containing aapt binary') 790 parser.add_argument( 791 '--tradefed-adb-path', 792 type=_RealPath, default=None, 793 help='Location of the directory containing adb binary') 794 # The below arguments are not used, but allow us to pass the same arguments 795 # from run_cts.py regardless of type of run (instrumentation/hostside) 796 parser.add_argument( 797 '--apk-under-test', 798 help=argparse.SUPPRESS) 799 parser.add_argument( 800 '--use-apk-under-test-flags-file', 801 action='store_true', 802 help=argparse.SUPPRESS) 803 parser.add_argument( 804 '-E', '--exclude-annotation', 805 dest='exclude_annotation_str', 806 help=argparse.SUPPRESS) 807 808 809def AddJUnitTestOptions(parser): 810 """Adds junit test options to |parser|.""" 811 812 parser = parser.add_argument_group('junit arguments') 813 814 parser.add_argument( 815 '--coverage-on-the-fly', 816 action='store_true', 817 help='Generate coverage data by Jacoco on-the-fly instrumentation.') 818 parser.add_argument( 819 '--coverage-dir', type=os.path.realpath, 820 help='Directory to store coverage info.') 821 parser.add_argument( 822 '--package-filter', 823 help='Filters tests by package.') 824 parser.add_argument( 825 '--runner-filter', 826 help='Filters tests by runner class. Must be fully qualified.') 827 parser.add_argument('--json-config', 828 help='Runs only tests listed in this config.') 829 parser.add_argument( 830 '--shards', 831 type=int, 832 help='Number of shards to run junit tests in parallel on. Only 1 shard ' 833 'is supported when test-filter is specified. Values less than 1 will ' 834 'use auto select.') 835 parser.add_argument('--shard-filter', 836 help='Comma separated list of shard indices to run.') 837 parser.add_argument( 838 '-s', '--test-suite', required=True, 839 help='JUnit test suite to run.') 840 debug_group = parser.add_mutually_exclusive_group() 841 debug_group.add_argument( 842 '-w', '--wait-for-java-debugger', action='store_const', const='8701', 843 dest='debug_socket', help='Alias for --debug-socket=8701') 844 debug_group.add_argument( 845 '--debug-socket', 846 help='Wait for java debugger to attach at specified socket address ' 847 'before running any application code. Also disables test timeouts ' 848 'and sets retries=0.') 849 850 # These arguments are for Android Robolectric tests. 851 parser.add_argument( 852 '--robolectric-runtime-deps-dir', 853 help='Path to runtime deps for Robolectric.') 854 parser.add_argument('--native-libs-dir', 855 help='Path to search for native libraries.') 856 parser.add_argument( 857 '--resource-apk', 858 required=True, 859 help='Path to .ap_ containing binary resources for Robolectric.') 860 parser.add_argument('--shadows-allowlist', 861 help='Path to Allowlist file for Shadows.') 862 863 864def AddLinkerTestOptions(parser): 865 866 parser = parser.add_argument_group('linker arguments') 867 868 parser.add_argument( 869 '--test-apk', 870 type=os.path.realpath, 871 help='Path to the linker test APK.') 872 873 874def AddMonkeyTestOptions(parser): 875 """Adds monkey test options to |parser|.""" 876 877 parser = parser.add_argument_group('monkey arguments') 878 879 parser.add_argument('--browser', 880 required=True, 881 choices=list(constants.PACKAGE_INFO.keys()), 882 metavar='BROWSER', 883 help='Browser under test.') 884 parser.add_argument( 885 '--category', 886 nargs='*', dest='categories', default=[], 887 help='A list of allowed categories. Monkey will only visit activities ' 888 'that are listed with one of the specified categories.') 889 parser.add_argument( 890 '--event-count', 891 default=10000, type=int, 892 help='Number of events to generate (default: %(default)s).') 893 parser.add_argument( 894 '--seed', 895 type=int, 896 help='Seed value for pseudo-random generator. Same seed value generates ' 897 'the same sequence of events. Seed is randomized by default.') 898 parser.add_argument( 899 '--throttle', 900 default=100, type=int, 901 help='Delay between events (ms) (default: %(default)s). ') 902 903 904def AddPythonTestOptions(parser): 905 906 parser = parser.add_argument_group('python arguments') 907 908 parser.add_argument('-s', 909 '--suite', 910 dest='suite_name', 911 metavar='SUITE_NAME', 912 choices=list(constants.PYTHON_UNIT_TEST_SUITES.keys()), 913 help='Name of the test suite to run.') 914 915 916def _CreateClassToFileNameDict(test_apk): 917 """Creates a dict mapping classes to file names from size-info apk.""" 918 constants.CheckOutputDirectory() 919 test_apk_size_info = os.path.join(constants.GetOutDirectory(), 'size-info', 920 os.path.basename(test_apk) + '.jar.info') 921 922 class_to_file_dict = {} 923 # Some tests such as webview_cts_tests use a separately downloaded apk to run 924 # tests. This means the apk may not have been built by the system and hence 925 # no size info file exists. 926 if not os.path.exists(test_apk_size_info): 927 logging.debug('Apk size file not found. %s', test_apk_size_info) 928 return class_to_file_dict 929 930 with open(test_apk_size_info, 'r') as f: 931 for line in f: 932 file_class, file_name = line.rstrip().split(',', 1) 933 # Only want files that are not prebuilt. 934 if file_name.startswith('../../'): 935 class_to_file_dict[file_class] = str( 936 file_name.replace('../../', '//', 1)) 937 938 return class_to_file_dict 939 940 941def _RunPythonTests(args): 942 """Subcommand of RunTestsCommand which runs python unit tests.""" 943 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] 944 suite_path = suite_vars['path'] 945 suite_test_modules = suite_vars['test_modules'] 946 947 sys.path = [suite_path] + sys.path 948 try: 949 suite = unittest.TestSuite() 950 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) 951 for m in suite_test_modules) 952 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) 953 return 0 if runner.run(suite).wasSuccessful() else 1 954 finally: 955 sys.path = sys.path[1:] 956 957 958_DEFAULT_PLATFORM_MODE_TESTS = [ 959 'gtest', 'hostside', 'instrumentation', 'junit', 'linker', 'monkey' 960] 961 962 963def RunTestsCommand(args, result_sink_client=None): 964 """Checks test type and dispatches to the appropriate function. 965 966 Args: 967 args: argparse.Namespace object. 968 result_sink_client: A ResultSinkClient object. 969 970 Returns: 971 Integer indicated exit code. 972 973 Raises: 974 Exception: Unknown command name passed in, or an exception from an 975 individual test runner. 976 """ 977 command = args.command 978 979 ProcessCommonOptions(args) 980 logging.info('command: %s', shlex.join(sys.argv)) 981 if args.enable_platform_mode or command in _DEFAULT_PLATFORM_MODE_TESTS: 982 return RunTestsInPlatformMode(args, result_sink_client) 983 984 if command == 'python': 985 return _RunPythonTests(args) 986 raise Exception('Unknown test type.') 987 988 989def _SinkTestResult(test_result, test_file_name, result_sink_client): 990 """Upload test result to result_sink. 991 992 Args: 993 test_result: A BaseTestResult object 994 test_file_name: A string representing the file location of the test 995 result_sink_client: A ResultSinkClient object 996 997 Returns: 998 N/A 999 """ 1000 # Some tests put in non utf-8 char as part of the test 1001 # which breaks uploads, so need to decode and re-encode. 1002 log_decoded = test_result.GetLog() 1003 if isinstance(log_decoded, bytes): 1004 log_decoded = log_decoded.decode('utf-8', 'replace') 1005 html_artifact = '' 1006 https_artifacts = [] 1007 for link_name, link_url in sorted(test_result.GetLinks().items()): 1008 if link_url.startswith('https:'): 1009 https_artifacts.append('<li><a target="_blank" href=%s>%s</a></li>' % 1010 (link_url, link_name)) 1011 else: 1012 logging.info('Skipping non-https link %r (%s) for test %s.', link_name, 1013 link_url, test_result.GetName()) 1014 if https_artifacts: 1015 html_artifact += '<ul>%s</ul>' % '\n'.join(https_artifacts) 1016 result_sink_client.Post(test_result.GetNameForResultSink(), 1017 test_result.GetType(), 1018 test_result.GetDuration(), 1019 log_decoded, 1020 test_file_name, 1021 variant=test_result.GetVariantForResultSink(), 1022 failure_reason=test_result.GetFailureReason(), 1023 html_artifact=html_artifact) 1024 1025 1026_SUPPORTED_IN_PLATFORM_MODE = [ 1027 # TODO(jbudorick): Add support for more test types. 1028 'gtest', 1029 'hostside', 1030 'instrumentation', 1031 'junit', 1032 'linker', 1033 'monkey', 1034] 1035 1036 1037def UploadExceptions(result_sink_client, exc_recorder): 1038 if not result_sink_client or not exc_recorder.size(): 1039 return 1040 1041 try_count_max = 3 1042 for try_count in range(1, try_count_max + 1): 1043 logging.info('Uploading exception records to RDB. (TRY %d/%d)', try_count, 1044 try_count_max) 1045 try: 1046 record_dict = exc_recorder.to_dict() 1047 result_sink_client.UpdateInvocationExtendedProperties( 1048 {exc_recorder.EXCEPTION_OCCURRENCES_KEY: record_dict}) 1049 exc_recorder.clear() 1050 break 1051 except Exception as e: # pylint: disable=W0703 1052 logging.error("Got error %s when uploading exception records.", e) 1053 # Upload can fail due to record size being too big. 1054 # In this case, let's try to reduce the size. 1055 if try_count == try_count_max - 2: 1056 # Clear all the stackstrace to reduce size. 1057 exc_recorder.clear_stacktrace() 1058 elif try_count == try_count_max - 1: 1059 # Clear all the records and just report the upload failure. 1060 exc_recorder.clear() 1061 exc_recorder.register(e) 1062 elif try_count == try_count_max: 1063 # Swallow the exception if the upload fails again and hit the max 1064 # try so that it won't fail the test task (and it shouldn't). 1065 exc_recorder.clear() 1066 logging.error("Hit max retry. Skip uploading exception records.") 1067 1068 1069def RunTestsInPlatformMode(args, result_sink_client=None): 1070 1071 def infra_error(message): 1072 logging.fatal(message) 1073 sys.exit(constants.INFRA_EXIT_CODE) 1074 1075 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 1076 infra_error('%s is not yet supported in platform mode' % args.command) 1077 1078 ### Set up sigterm handler. 1079 1080 contexts_to_notify_on_sigterm = [] 1081 def unexpected_sigterm(_signum, _frame): 1082 msg = [ 1083 'Received SIGTERM. Shutting down.', 1084 ] 1085 for live_thread in threading.enumerate(): 1086 # pylint: disable=protected-access 1087 thread_stack = ''.join(traceback.format_stack( 1088 sys._current_frames()[live_thread.ident])) 1089 msg.extend([ 1090 'Thread "%s" (ident: %s) is currently running:' % ( 1091 live_thread.name, live_thread.ident), 1092 thread_stack]) 1093 1094 for context in contexts_to_notify_on_sigterm: 1095 context.ReceivedSigterm() 1096 1097 infra_error('\n'.join(msg)) 1098 1099 signal.signal(signal.SIGTERM, unexpected_sigterm) 1100 1101 ### Set up results handling. 1102 # TODO(jbudorick): Rewrite results handling. 1103 1104 # all_raw_results is a list of lists of 1105 # base_test_result.TestRunResults objects. Each instance of 1106 # TestRunResults contains all test results produced by a single try, 1107 # while each list of TestRunResults contains all tries in a single 1108 # iteration. 1109 all_raw_results = [] 1110 1111 # all_iteration_results is a list of base_test_result.TestRunResults 1112 # objects. Each instance of TestRunResults contains the last test 1113 # result for each test run in that iteration. 1114 all_iteration_results = [] 1115 1116 global_results_tags = set() 1117 1118 json_file = tempfile.NamedTemporaryFile(delete=False) 1119 json_file.close() 1120 1121 @contextlib.contextmanager 1122 def json_finalizer(): 1123 try: 1124 yield 1125 finally: 1126 if args.json_results_file and os.path.exists(json_file.name): 1127 shutil.move(json_file.name, args.json_results_file) 1128 elif args.isolated_script_test_output and os.path.exists(json_file.name): 1129 shutil.move(json_file.name, args.isolated_script_test_output) 1130 else: 1131 os.remove(json_file.name) 1132 1133 @contextlib.contextmanager 1134 def json_writer(): 1135 try: 1136 yield 1137 except Exception: 1138 global_results_tags.add('UNRELIABLE_RESULTS') 1139 raise 1140 finally: 1141 if args.isolated_script_test_output: 1142 interrupted = 'UNRELIABLE_RESULTS' in global_results_tags 1143 json_results.GenerateJsonTestResultFormatFile(all_raw_results, 1144 interrupted, 1145 json_file.name, 1146 indent=2) 1147 else: 1148 json_results.GenerateJsonResultsFile( 1149 all_raw_results, 1150 json_file.name, 1151 global_tags=list(global_results_tags), 1152 indent=2) 1153 1154 test_class_to_file_name_dict = {} 1155 # Test Location is only supported for instrumentation tests as it 1156 # requires the size-info file. 1157 if test_instance.TestType() == 'instrumentation': 1158 test_class_to_file_name_dict = _CreateClassToFileNameDict(args.test_apk) 1159 1160 if result_sink_client: 1161 for run in all_raw_results: 1162 for results in run: 1163 for r in results.GetAll(): 1164 # Matches chrome.page_info.PageInfoViewTest#testChromePage 1165 match = re.search(r'^(.+\..+)#', r.GetName()) 1166 test_file_name = test_class_to_file_name_dict.get( 1167 match.group(1)) if match else None 1168 _SinkTestResult(r, test_file_name, result_sink_client) 1169 1170 @contextlib.contextmanager 1171 def upload_logcats_file(): 1172 try: 1173 yield 1174 finally: 1175 if not args.logcat_output_file: 1176 logging.critical('Cannot upload logcat file: no file specified.') 1177 elif not os.path.exists(args.logcat_output_file): 1178 logging.critical("Cannot upload logcat file: file doesn't exist.") 1179 else: 1180 with open(args.logcat_output_file) as src: 1181 dst = logdog_helper.open_text('unified_logcats') 1182 if dst: 1183 shutil.copyfileobj(src, dst) 1184 dst.close() 1185 logging.critical( 1186 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) 1187 1188 1189 logcats_uploader = contextlib_ext.Optional( 1190 upload_logcats_file(), 1191 'upload_logcats_file' in args and args.upload_logcats_file) 1192 1193 save_detailed_results = (args.local_output or not local_utils.IsOnSwarming() 1194 ) and not args.isolated_script_test_output 1195 1196 @contextlib.contextmanager 1197 def exceptions_uploader(): 1198 try: 1199 yield 1200 finally: 1201 UploadExceptions(result_sink_client, exception_recorder) 1202 1203 ### Set up test objects. 1204 1205 out_manager = output_manager_factory.CreateOutputManager(args) 1206 env = environment_factory.CreateEnvironment( 1207 args, out_manager, infra_error) 1208 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) 1209 test_run = test_run_factory.CreateTestRun(env, test_instance, infra_error) 1210 1211 contexts_to_notify_on_sigterm.append(env) 1212 contexts_to_notify_on_sigterm.append(test_run) 1213 1214 if args.list_tests: 1215 try: 1216 with out_manager, env, test_instance, test_run: 1217 test_names = test_run.GetTestsForListing() 1218 print('There are {} tests:'.format(len(test_names))) 1219 for n in test_names: 1220 print(n) 1221 return 0 1222 except NotImplementedError: 1223 sys.stderr.write('Test does not support --list-tests (type={}).\n'.format( 1224 args.command)) 1225 return 1 1226 1227 if getattr(args, 'list_data', False): 1228 with out_manager, env, test_instance, test_run: 1229 data_deps = test_run.GetDataDepsForListing() 1230 1231 print('There are {} data files:'.format(len(data_deps))) 1232 for d in data_deps: 1233 print(d) 1234 return 0 1235 1236 ### Run. 1237 with out_manager, json_finalizer(): 1238 # |raw_logs_fh| is only used by Robolectric tests. 1239 raw_logs_fh = io.StringIO() if save_detailed_results else None 1240 1241 with json_writer(), exceptions_uploader(), logcats_uploader, \ 1242 env, test_instance, test_run: 1243 1244 repetitions = (range(args.repeat + 1245 1) if args.repeat >= 0 else itertools.count()) 1246 result_counts = collections.defaultdict( 1247 lambda: collections.defaultdict(int)) 1248 iteration_count = 0 1249 for _ in repetitions: 1250 # raw_results will be populated with base_test_result.TestRunResults by 1251 # test_run.RunTests(). It is immediately added to all_raw_results so 1252 # that in the event of an exception, all_raw_results will already have 1253 # the up-to-date results and those can be written to disk. 1254 raw_results = [] 1255 all_raw_results.append(raw_results) 1256 1257 test_run.RunTests(raw_results, raw_logs_fh=raw_logs_fh) 1258 if not raw_results: 1259 all_raw_results.pop() 1260 continue 1261 1262 iteration_results = base_test_result.TestRunResults() 1263 for r in reversed(raw_results): 1264 iteration_results.AddTestRunResults(r) 1265 all_iteration_results.append(iteration_results) 1266 iteration_count += 1 1267 1268 for r in iteration_results.GetAll(): 1269 result_counts[r.GetName()][r.GetType()] += 1 1270 1271 report_results.LogFull( 1272 results=iteration_results, 1273 test_type=test_instance.TestType(), 1274 test_package=test_run.TestPackage(), 1275 annotation=getattr(args, 'annotations', None), 1276 flakiness_server=getattr(args, 'flakiness_dashboard_server', 1277 None)) 1278 1279 failed_tests = (iteration_results.GetNotPass() - 1280 iteration_results.GetSkip()) 1281 if failed_tests: 1282 _LogRerunStatement(failed_tests, args.wrapper_script_args) 1283 1284 if args.break_on_failure and not iteration_results.DidRunPass(): 1285 break 1286 1287 if iteration_count > 1: 1288 # display summary results 1289 # only display results for a test if at least one test did not pass 1290 all_pass = 0 1291 tot_tests = 0 1292 for test_name in result_counts: 1293 tot_tests += 1 1294 if any(result_counts[test_name][x] for x in ( 1295 base_test_result.ResultType.FAIL, 1296 base_test_result.ResultType.CRASH, 1297 base_test_result.ResultType.TIMEOUT, 1298 base_test_result.ResultType.UNKNOWN)): 1299 logging.critical( 1300 '%s: %s', 1301 test_name, 1302 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) 1303 for i in base_test_result.ResultType.GetTypes())) 1304 else: 1305 all_pass += 1 1306 1307 logging.critical('%s of %s tests passed in all %s runs', 1308 str(all_pass), 1309 str(tot_tests), 1310 str(iteration_count)) 1311 1312 if save_detailed_results: 1313 assert raw_logs_fh 1314 raw_logs_fh.seek(0) 1315 raw_logs = raw_logs_fh.read() 1316 if raw_logs: 1317 with out_manager.ArchivedTempfile( 1318 'raw_logs.txt', 'raw_logs', 1319 output_manager.Datatype.TEXT) as raw_logs_file: 1320 raw_logs_file.write(raw_logs) 1321 logging.critical('RAW LOGS: %s', raw_logs_file.Link()) 1322 1323 with out_manager.ArchivedTempfile( 1324 'test_results_presentation.html', 1325 'test_results_presentation', 1326 output_manager.Datatype.HTML) as results_detail_file: 1327 result_html_string, _, _ = test_results_presentation.result_details( 1328 json_path=json_file.name, 1329 test_name=args.command, 1330 cs_base_url='http://cs.chromium.org', 1331 local_output=True) 1332 results_detail_file.write(result_html_string) 1333 results_detail_file.flush() 1334 logging.critical('TEST RESULTS: %s', results_detail_file.Link()) 1335 1336 ui_screenshots = test_results_presentation.ui_screenshot_set( 1337 json_file.name) 1338 if ui_screenshots: 1339 with out_manager.ArchivedTempfile( 1340 'ui_screenshots.json', 1341 'ui_capture', 1342 output_manager.Datatype.JSON) as ui_screenshot_file: 1343 ui_screenshot_file.write(ui_screenshots) 1344 logging.critical('UI Screenshots: %s', ui_screenshot_file.Link()) 1345 1346 return (0 if all(r.DidRunPass() for r in all_iteration_results) 1347 else constants.ERROR_EXIT_CODE) 1348 1349 1350def _LogRerunStatement(failed_tests, wrapper_arg_str): 1351 """Logs a message that can rerun the failed tests. 1352 1353 Logs a copy/pasteable message that filters tests so just the failing tests 1354 are run. 1355 1356 Args: 1357 failed_tests: A set of test results that did not pass. 1358 wrapper_arg_str: A string of args that were passed to the called wrapper 1359 script. 1360 """ 1361 rerun_arg_list = [] 1362 try: 1363 constants.CheckOutputDirectory() 1364 # constants.CheckOutputDirectory throws bare exceptions. 1365 except: # pylint: disable=bare-except 1366 logging.exception('Output directory not found. Unable to generate failing ' 1367 'test filter file.') 1368 return 1369 1370 output_directory = constants.GetOutDirectory() 1371 if not os.path.exists(output_directory): 1372 logging.error('Output directory not found. Unable to generate failing ' 1373 'test filter file.') 1374 return 1375 1376 test_filter_file = os.path.join(os.path.relpath(output_directory), 1377 _RERUN_FAILED_TESTS_FILE) 1378 arg_list = shlex.split(wrapper_arg_str) if wrapper_arg_str else sys.argv 1379 index = 0 1380 while index < len(arg_list): 1381 arg = arg_list[index] 1382 # Skip adding the filter=<file> and/or the filter arg as we're replacing 1383 # it with the new filter arg. 1384 # This covers --test-filter=, --test-launcher-filter-file=, --gtest-filter=, 1385 # --test-filter *Foobar.baz, -f *foobar, --package-filter <package>, 1386 # --runner-filter <runner>. 1387 if 'filter' in arg or arg == '-f': 1388 index += 1 if '=' in arg else 2 1389 continue 1390 1391 rerun_arg_list.append(arg) 1392 index += 1 1393 1394 failed_test_list = [str(t) for t in failed_tests] 1395 with open(test_filter_file, 'w') as fp: 1396 for t in failed_test_list: 1397 # Test result names can have # in them that don't match when applied as 1398 # a test name filter. 1399 fp.write('%s\n' % t.replace('#', '.')) 1400 1401 rerun_arg_list.append('--test-launcher-filter-file=%s' % test_filter_file) 1402 msg = """ 1403 %d Test(s) failed. 1404 Rerun failed tests with copy and pastable command: 1405 %s 1406 """ 1407 logging.critical(msg, len(failed_tests), shlex.join(rerun_arg_list)) 1408 1409 1410def DumpThreadStacks(_signal, _frame): 1411 for thread in threading.enumerate(): 1412 reraiser_thread.LogThreadStack(thread) 1413 1414 1415def main(): 1416 signal.signal(signal.SIGUSR1, DumpThreadStacks) 1417 1418 parser = argparse.ArgumentParser() 1419 command_parsers = parser.add_subparsers( 1420 title='test types', dest='command') 1421 1422 subp = command_parsers.add_parser( 1423 'gtest', 1424 help='googletest-based C++ tests') 1425 AddCommonOptions(subp) 1426 AddDeviceOptions(subp) 1427 AddEmulatorOptions(subp) 1428 AddGTestOptions(subp) 1429 AddTracingOptions(subp) 1430 AddCommandLineOptions(subp) 1431 1432 subp = command_parsers.add_parser( 1433 'hostside', 1434 help='Webview CTS host-side tests') 1435 AddCommonOptions(subp) 1436 AddDeviceOptions(subp) 1437 AddEmulatorOptions(subp) 1438 AddHostsideTestOptions(subp) 1439 1440 subp = command_parsers.add_parser( 1441 'instrumentation', 1442 help='InstrumentationTestCase-based Java tests') 1443 AddCommonOptions(subp) 1444 AddDeviceOptions(subp) 1445 AddEmulatorOptions(subp) 1446 AddInstrumentationTestOptions(subp) 1447 AddSkiaGoldTestOptions(subp) 1448 AddTracingOptions(subp) 1449 AddCommandLineOptions(subp) 1450 1451 subp = command_parsers.add_parser( 1452 'junit', 1453 help='JUnit4-based Java tests') 1454 AddCommonOptions(subp) 1455 AddJUnitTestOptions(subp) 1456 1457 subp = command_parsers.add_parser( 1458 'linker', 1459 help='linker tests') 1460 AddCommonOptions(subp) 1461 AddDeviceOptions(subp) 1462 AddEmulatorOptions(subp) 1463 AddLinkerTestOptions(subp) 1464 1465 subp = command_parsers.add_parser( 1466 'monkey', 1467 help="tests based on Android's monkey command") 1468 AddCommonOptions(subp) 1469 AddDeviceOptions(subp) 1470 AddEmulatorOptions(subp) 1471 AddMonkeyTestOptions(subp) 1472 1473 subp = command_parsers.add_parser( 1474 'python', 1475 help='python tests based on unittest.TestCase') 1476 AddCommonOptions(subp) 1477 AddPythonTestOptions(subp) 1478 1479 args, unknown_args = parser.parse_known_args() 1480 1481 if unknown_args: 1482 if getattr(args, 'allow_unknown', None): 1483 args.command_line_flags = unknown_args 1484 else: 1485 parser.error('unrecognized arguments: %s' % ' '.join(unknown_args)) 1486 1487 # --enable-concurrent-adb does not handle device reboots gracefully. 1488 if getattr(args, 'enable_concurrent_adb', None): 1489 if getattr(args, 'replace_system_package', None): 1490 logging.warning( 1491 'Ignoring --enable-concurrent-adb due to --replace-system-package') 1492 args.enable_concurrent_adb = False 1493 elif getattr(args, 'system_packages_to_remove', None): 1494 logging.warning( 1495 'Ignoring --enable-concurrent-adb due to --remove-system-package') 1496 args.enable_concurrent_adb = False 1497 elif getattr(args, 'use_webview_provider', None): 1498 logging.warning( 1499 'Ignoring --enable-concurrent-adb due to --use-webview-provider') 1500 args.enable_concurrent_adb = False 1501 1502 if (getattr(args, 'coverage_on_the_fly', False) 1503 and not getattr(args, 'coverage_dir', '')): 1504 parser.error('--coverage-on-the-fly requires --coverage-dir') 1505 1506 if (getattr(args, 'debug_socket', None) 1507 or getattr(args, 'wait_for_java_debugger', None)): 1508 args.num_retries = 0 1509 1510 # Result-sink may not exist in the environment if rdb stream is not enabled. 1511 result_sink_client = result_sink.TryInitClient() 1512 1513 try: 1514 return RunTestsCommand(args, result_sink_client) 1515 except base_error.BaseError as e: 1516 logging.exception('Error occurred.') 1517 if e.is_infra_error: 1518 return constants.INFRA_EXIT_CODE 1519 return constants.ERROR_EXIT_CODE 1520 except Exception: # pylint: disable=W0703 1521 logging.exception('Unrecognized error occurred.') 1522 return constants.ERROR_EXIT_CODE 1523 1524 1525if __name__ == '__main__': 1526 exit_code = main() 1527 if exit_code == constants.INFRA_EXIT_CODE: 1528 # This exit code is returned in case of missing, unreachable, 1529 # or otherwise not fit for purpose test devices. 1530 # When this happens, the graceful cleanup triggered by sys.exit() 1531 # hangs indefinitely (on swarming - until it hits 20min timeout). 1532 # Skip cleanup (other than flushing output streams) and exit forcefully 1533 # to avoid the hang. 1534 sys.stdout.flush() 1535 sys.stderr.flush() 1536 os._exit(exit_code) # pylint: disable=protected-access 1537 else: 1538 sys.exit(exit_code) 1539