1#!/usr/bin/python3 2# 3# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7 8"""Unit tests for server/cros/dynamic_suite/dynamic_suite.py.""" 9 10from __future__ import absolute_import 11from __future__ import division 12from __future__ import print_function 13 14import collections 15from collections import OrderedDict 16import os 17import six 18from six.moves import range 19from six.moves import zip 20import shutil 21import tempfile 22import unittest 23from unittest.mock import patch, call, ANY 24 25import common 26 27from autotest_lib.client.common_lib import base_job 28from autotest_lib.client.common_lib import control_data 29from autotest_lib.client.common_lib import error 30from autotest_lib.client.common_lib import priorities 31from autotest_lib.client.common_lib import utils 32from autotest_lib.client.common_lib.cros import dev_server 33from autotest_lib.server import frontend 34from autotest_lib.server.cros import provision 35from autotest_lib.server.cros.dynamic_suite import control_file_getter 36from autotest_lib.server.cros.dynamic_suite import constants 37from autotest_lib.server.cros.dynamic_suite import job_status 38from autotest_lib.server.cros.dynamic_suite import suite as SuiteBase 39from autotest_lib.server.cros.dynamic_suite import suite_common 40from autotest_lib.server.cros.dynamic_suite.comparators import StatusContains 41from autotest_lib.server.cros.dynamic_suite.fakes import FakeControlData 42from autotest_lib.server.cros.dynamic_suite.fakes import FakeJob 43from autotest_lib.server.cros.dynamic_suite.fakes import FakeMultiprocessingPool 44from autotest_lib.server.cros.dynamic_suite.suite import RetryHandler 45from autotest_lib.server.cros.dynamic_suite.suite import Suite 46 47from unittest.mock import MagicMock 48 49 50class TypeMatcher(object): 51 """Matcher for object is of type.""" 52 53 def __init__(self, expected_type): 54 self.expected_type = expected_type 55 56 def __eq__(self, other): 57 return isinstance(other, self.expected_type) 58 59 60class SuiteTest(unittest.TestCase): 61 """Unit tests for dynamic_suite Suite class. 62 63 @var _BUILDS: fake build 64 @var _TAG: fake suite tag 65 """ 66 67 _BOARD = 'board:board' 68 _BUILDS = {provision.CROS_VERSION_PREFIX:'build_1', 69 provision.FW_RW_VERSION_PREFIX:'fwrw_build_1'} 70 _TAG = 'au' 71 _ATTR = {'attr:attr'} 72 _DEVSERVER_HOST = 'http://dontcare:8080' 73 _FAKE_JOB_ID = 10 74 75 def setUp(self): 76 """Setup.""" 77 super(SuiteTest, self).setUp() 78 self.maxDiff = None 79 self.use_batch = suite_common.ENABLE_CONTROLS_IN_BATCH 80 suite_common.ENABLE_CONTROLS_IN_BATCH = False 81 afe_patcher = patch.object(frontend, 'AFE') 82 self.afe = afe_patcher.start() 83 self.addCleanup(afe_patcher.stop) 84 tko_patcher = patch.object(frontend, 'TKO') 85 self.tko = tko_patcher.start() 86 self.addCleanup(tko_patcher.stop) 87 88 self.tmpdir = tempfile.mkdtemp(suffix=type(self).__name__) 89 90 getter_patch = patch.object(control_file_getter, 'ControlFileGetter') 91 self.getter = getter_patch.start() 92 self.addCleanup(getter_patch.stop) 93 94 self.devserver = dev_server.ImageServer(self._DEVSERVER_HOST) 95 96 self.files = OrderedDict( 97 [('one', FakeControlData(self._TAG, self._ATTR, 'data_one', 98 'FAST', job_retries=None)), 99 ('two', FakeControlData(self._TAG, self._ATTR, 'data_two', 100 'SHORT', dependencies=['feta'])), 101 ('three', FakeControlData(self._TAG, self._ATTR, 'data_three', 102 'MEDIUM')), 103 ('four', FakeControlData('other', self._ATTR, 'data_four', 104 'LONG', dependencies=['arugula'])), 105 ('five', FakeControlData(self._TAG, {'other'}, 'data_five', 106 'LONG', dependencies=['arugula', 107 'caligula'])), 108 ('six', FakeControlData(self._TAG, self._ATTR, 'data_six', 109 'LENGTHY')), 110 ('seven', FakeControlData(self._TAG, self._ATTR, 'data_seven', 111 'FAST', job_retries=1))]) 112 113 self.files_to_filter = { 114 'with/deps/...': FakeControlData(self._TAG, self._ATTR, 115 'gets filtered'), 116 'with/profilers/...': FakeControlData(self._TAG, self._ATTR, 117 'gets filtered')} 118 119 120 def additional_mocking(self): 121 patcher = patch.object(control_data, 'parse_control_string') 122 self.cf_getter_string = patcher.start() 123 self.addCleanup(patcher.stop) 124 125 def tearDown(self): 126 """Teardown.""" 127 suite_common.ENABLE_CONTROLS_IN_BATCH = self.use_batch 128 super(SuiteTest, self).tearDown() 129 shutil.rmtree(self.tmpdir, ignore_errors=True) 130 131 132 def expect_control_file_parsing(self, suite_name=_TAG): 133 """Expect an attempt to parse the 'control files' in |self.files|. 134 135 @param suite_name: The suite name to parse control files for. 136 """ 137 all_files = list(self.files.keys()) + list(self.files_to_filter.keys()) 138 self._set_control_file_parsing_expectations(False, all_files, 139 self.files, suite_name) 140 141 142 def _set_control_file_parsing_expectations(self, already_stubbed, 143 file_list, files_to_parse, 144 suite_name): 145 """Expect an attempt to parse the 'control files' in |files|. 146 147 @param already_stubbed: parse_control_string already stubbed out. 148 @param file_list: the files the dev server returns 149 @param files_to_parse: the {'name': FakeControlData} dict of files we 150 expect to get parsed. 151 """ 152 if not already_stubbed: 153 self.additional_mocking() 154 patcher = patch.object(suite_common.multiprocessing, 'Pool') 155 self.mp_pool = patcher.start() 156 self.addCleanup(patcher.stop) 157 self.mp_pool.return_value = FakeMultiprocessingPool() 158 159 self.getter.get_control_file_list.return_value = file_list 160 get_control_file_contents_mock_list = [] 161 parse_mock_list = [] 162 for file, data in six.iteritems(files_to_parse): 163 get_control_file_contents_mock_list.append(data.string) 164 parse_mock_list.append(data) 165 self.getter.get_control_file_contents.side_effect = get_control_file_contents_mock_list 166 self.cf_getter_string.side_effect = parse_mock_list 167 168 169 def expect_control_file_parsing_in_batch(self, suite_name=_TAG): 170 """Expect an attempt to parse the contents of all control files in 171 |self.files| and |self.files_to_filter|, form them to a dict. 172 173 @param suite_name: The suite name to parse control files for. 174 """ 175 DevServerGetter_patch = patch.object(control_file_getter, 176 'DevServerGetter') 177 self.getter = DevServerGetter_patch.start() 178 self.addCleanup(DevServerGetter_patch.stop) 179 180 patcher = patch.object(suite_common.multiprocessing, 'Pool') 181 mp_pool = patcher.start() 182 self.addCleanup(patcher.stop) 183 mp_pool.return_value = FakeMultiprocessingPool() 184 185 suite_info = {} 186 call_list = [] 187 expected_calls = [] 188 for k, v in six.iteritems(self.files): 189 suite_info[k] = v.string 190 call_list.append(v) 191 expected_calls.append(call(v.string, raise_warnings=True, path=k)) 192 self.cf_getter_string.side_effect = (call_list) 193 for k, v in six.iteritems(self.files_to_filter): 194 suite_info[k] = v.string 195 self.getter._dev_server = self._DEVSERVER_HOST 196 self.getter.get_suite_info.return_value = suite_info 197 return expected_calls 198 199 def testFindAllTestInBatch(self): 200 """Test switch on enable_getting_controls_in_batch for function 201 find_all_test.""" 202 self.additional_mocking() 203 self.use_batch = suite_common.ENABLE_CONTROLS_IN_BATCH 204 expected_calls = self.expect_control_file_parsing_in_batch() 205 suite_common.ENABLE_CONTROLS_IN_BATCH = True 206 207 with patch.object(suite_common, '_should_batch_with') as sbw_mock: 208 sbw_mock.return_value = True 209 210 predicate = lambda d: d.suite == self._TAG 211 tests = SuiteBase.find_and_parse_tests(self.getter, predicate, 212 self._TAG) 213 self.assertEquals(len(tests), 6) 214 self.assertTrue(self.files['one'] in tests) 215 self.assertTrue(self.files['two'] in tests) 216 self.assertTrue(self.files['three'] in tests) 217 self.assertTrue(self.files['five'] in tests) 218 self.assertTrue(self.files['six'] in tests) 219 self.assertTrue(self.files['seven'] in tests) 220 suite_common.ENABLE_CONTROLS_IN_BATCH = self.use_batch 221 222 self.cf_getter_string.assert_has_calls(expected_calls, any_order=True) 223 224 def testFindAndParseStableTests(self): 225 """Should find only tests that match a predicate.""" 226 self.additional_mocking() 227 228 self.expect_control_file_parsing() 229 230 predicate = lambda d: d.text == self.files['two'].string 231 tests = SuiteBase.find_and_parse_tests(self.getter, 232 predicate, 233 self._TAG) 234 self.assertEquals(len(tests), 1) 235 self.assertEquals(tests[0], self.files['two']) 236 237 238 def testFindSuiteSyntaxErrors(self): 239 """Check all control files for syntax errors. 240 241 This test actually parses all control files in the autotest directory 242 for syntax errors, by using the un-forgiving parser and pretending to 243 look for all control files with the suite attribute. 244 """ 245 246 autodir = os.path.abspath( 247 os.path.join(os.path.dirname(__file__), '..', '..', '..')) 248 fs_getter = SuiteBase.create_fs_getter(autodir) 249 predicate = lambda t: hasattr(t, 'suite') 250 SuiteBase.find_and_parse_tests(fs_getter, predicate, 251 forgiving_parser=False) 252 253 254 def testFindAndParseTestsSuite(self): 255 """Should find all tests that match a predicate.""" 256 self.additional_mocking() 257 self.expect_control_file_parsing() 258 259 predicate = lambda d: d.suite == self._TAG 260 tests = SuiteBase.find_and_parse_tests(self.getter, 261 predicate, 262 self._TAG) 263 self.assertEquals(len(tests), 6) 264 self.assertTrue(self.files['one'] in tests) 265 self.assertTrue(self.files['two'] in tests) 266 self.assertTrue(self.files['three'] in tests) 267 self.assertTrue(self.files['five'] in tests) 268 self.assertTrue(self.files['six'] in tests) 269 self.assertTrue(self.files['seven'] in tests) 270 271 272 def testFindAndParseTestsAttr(self): 273 """Should find all tests that match a predicate.""" 274 self.additional_mocking() 275 self.expect_control_file_parsing() 276 277 predicate = SuiteBase.matches_attribute_expression_predicate('attr:attr') 278 tests = SuiteBase.find_and_parse_tests(self.getter, 279 predicate, 280 self._TAG) 281 self.assertEquals(len(tests), 6) 282 self.assertTrue(self.files['one'] in tests) 283 self.assertTrue(self.files['two'] in tests) 284 self.assertTrue(self.files['three'] in tests) 285 self.assertTrue(self.files['four'] in tests) 286 self.assertTrue(self.files['six'] in tests) 287 self.assertTrue(self.files['seven'] in tests) 288 289 290 def testAdHocSuiteCreation(self): 291 """Should be able to schedule an ad-hoc suite by specifying 292 a single test name.""" 293 self.additional_mocking() 294 295 self.expect_control_file_parsing(suite_name='ad_hoc_suite') 296 predicate = SuiteBase.test_name_equals_predicate('name-data_five') 297 suite = Suite.create_from_predicates([predicate], self._BUILDS, 298 self._BOARD, devserver=None, 299 cf_getter=self.getter, 300 afe=self.afe, tko=self.tko) 301 302 self.assertFalse(self.files['one'] in suite.tests) 303 self.assertFalse(self.files['two'] in suite.tests) 304 self.assertFalse(self.files['four'] in suite.tests) 305 self.assertTrue(self.files['five'] in suite.tests) 306 307 308 def mock_control_file_parsing(self): 309 """Fake out find_and_parse_tests(), returning content from |self.files|. 310 """ 311 for test in self.files.values(): 312 test.text = test.string # mimic parsing. 313 parse_patch = patch.object(SuiteBase, 'find_and_parse_tests') 314 self.parse_mock = parse_patch.start() 315 self.addCleanup(parse_patch.stop) 316 self.parse_mock.return_value = self.files.values() 317 318 def expect_job_scheduling(self, recorder, 319 tests_to_skip=[], ignore_deps=False, 320 raises=False, suite_deps=[], suite=None, 321 extra_keyvals={}): 322 """Expect jobs to be scheduled for 'tests' in |self.files|. 323 324 @param recorder: object with a record_entry to be used to record test 325 results. 326 @param tests_to_skip: [list, of, test, names] that we expect to skip. 327 @param ignore_deps: If true, ignore tests' dependencies. 328 @param raises: If True, expect exceptions. 329 @param suite_deps: If True, add suite level dependencies. 330 @param extra_keyvals: Extra keyvals set to tests. 331 """ 332 record_job_id = suite and suite._results_dir 333 if record_job_id: 334 p = patch.object(suite, '_remember_job_keyval') 335 p.start() 336 self.addCleanup(p.stop) 337 recorder.record_entry( 338 StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG), 339 log_in_subdir=False) 340 tests = list(self.files.values()) 341 n = 1 342 for test in tests: 343 if test.name in tests_to_skip: 344 continue 345 dependencies = [] 346 if not ignore_deps: 347 dependencies.extend(test.dependencies) 348 if suite_deps: 349 dependencies.extend(suite_deps) 350 dependencies.append(self._BOARD) 351 build = self._BUILDS[provision.CROS_VERSION_PREFIX] 352 keyvals = { 353 'build': build, 354 'suite': self._TAG, 355 'builds': SuiteTest._BUILDS, 356 'experimental':test.experimental, 357 } 358 keyvals.update(extra_keyvals) 359 360 job_mock = self.afe.create_job( 361 control_file=test.text, 362 name=ANY, 363 control_type=ANY, 364 meta_hosts=[self._BOARD], 365 dependencies=dependencies, 366 keyvals=keyvals, 367 max_runtime_mins=24 * 60, 368 timeout_mins=1440, 369 parent_job_id=None, 370 reboot_before=ANY, 371 run_reset=ANY, 372 priority=priorities.Priority.DEFAULT, 373 synch_count=test.sync_count, 374 require_ssp=test.require_ssp) 375 if raises: 376 job_mock.side_effect = error.NoEligibleHostException() 377 recorder.record_entry( 378 StatusContains.CreateFromStrings('START', test.name), 379 log_in_subdir=False) 380 recorder.record_entry( 381 StatusContains.CreateFromStrings('TEST_NA', test.name), 382 log_in_subdir=False) 383 recorder.record_entry( 384 StatusContains.CreateFromStrings('END', test.name), 385 log_in_subdir=False) 386 else: 387 fake_job = FakeJob(id=n) 388 job_mock.return_value = fake_job 389 if record_job_id: 390 suite._remember_job_keyval(fake_job) 391 n += 1 392 393 394 def testScheduleTestsAndRecord(self): 395 """Should schedule stable and experimental tests with the AFE.""" 396 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 397 'name-data_four', 'name-data_five', 'name-data_six', 398 'name-data_seven'] 399 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: 7, 400 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 401 402 self.mock_control_file_parsing() 403 404 suite = Suite.create_from_name(self._TAG, self._BUILDS, self._BOARD, 405 self.devserver, 406 afe=self.afe, tko=self.tko, 407 results_dir=self.tmpdir) 408 recorder = MagicMock(base_job.base_job) 409 self.expect_job_scheduling(recorder, suite=suite) 410 with patch.object(utils, 'write_keyval'): 411 utils.write_keyval(self.tmpdir, keyval_dict) 412 suite.schedule(recorder.record_entry) 413 for job in suite._jobs: 414 self.assertTrue(hasattr(job, 'test_name')) 415 416 417 def testScheduleTests(self): 418 """Should schedule tests with the AFE.""" 419 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 420 'name-data_four', 'name-data_five', 'name-data_six', 421 'name-data_seven'] 422 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: len(name_list), 423 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 424 425 self.mock_control_file_parsing() 426 recorder = MagicMock(base_job.base_job) 427 self.expect_job_scheduling(recorder) 428 with patch.object(utils, 'write_keyval') as utils_patch: 429 utils_patch.write_keyval(None, keyval_dict) 430 431 suite = Suite.create_from_name(self._TAG, 432 self._BUILDS, 433 self._BOARD, 434 self.devserver, 435 afe=self.afe, 436 tko=self.tko) 437 suite.schedule(recorder.record_entry) 438 439 440 def testScheduleTestsIgnoreDeps(self): 441 """Test scheduling tests ignoring deps.""" 442 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 443 'name-data_four', 'name-data_five', 'name-data_six', 444 'name-data_seven'] 445 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: len(name_list), 446 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 447 448 self.mock_control_file_parsing() 449 recorder = MagicMock(base_job.base_job) 450 self.expect_job_scheduling(recorder, ignore_deps=True) 451 with patch.object(utils, 'write_keyval') as utils_patch: 452 utils_patch.write_keyval(None, keyval_dict) 453 454 suite = Suite.create_from_name(self._TAG, 455 self._BUILDS, 456 self._BOARD, 457 self.devserver, 458 afe=self.afe, 459 tko=self.tko, 460 ignore_deps=True) 461 suite.schedule(recorder.record_entry) 462 463 464 def testScheduleUnrunnableTestsTESTNA(self): 465 """Tests which fail to schedule should be TEST_NA.""" 466 # Since all tests will be fail to schedule, the num of scheduled tests 467 # will be zero. 468 name_list = [] 469 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: 0, 470 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 471 472 self.mock_control_file_parsing() 473 recorder = MagicMock(base_job.base_job) 474 self.expect_job_scheduling(recorder, raises=True) 475 with patch.object(utils, 'write_keyval') as utils_patch: 476 utils_patch.write_keyval(None, keyval_dict) 477 suite = Suite.create_from_name(self._TAG, 478 self._BUILDS, 479 self._BOARD, 480 self.devserver, 481 afe=self.afe, 482 tko=self.tko) 483 suite.schedule(recorder.record_entry) 484 485 486 def testRetryMapAfterScheduling(self): 487 """Test job-test and test-job mapping are correctly updated.""" 488 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 489 'name-data_four', 'name-data_five', 'name-data_six', 490 'name-data_seven'] 491 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: 7, 492 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 493 494 self.mock_control_file_parsing() 495 recorder = MagicMock(base_job.base_job) 496 self.expect_job_scheduling(recorder) 497 with patch.object(utils, 'write_keyval') as utils_patch: 498 utils_patch.write_keyval(None, keyval_dict) 499 all_files = list(self.files.items()) 500 # Sort tests in self.files so that they are in the same 501 # order as they are scheduled. 502 expected_retry_map = {} 503 for n in range(len(all_files)): 504 test = all_files[n][1] 505 job_id = n + 1 506 job_retries = 1 if test.job_retries is None else test.job_retries 507 if job_retries > 0: 508 expected_retry_map[job_id] = { 509 'state': RetryHandler.States.NOT_ATTEMPTED, 510 'retry_max': job_retries 511 } 512 513 suite = Suite.create_from_name(self._TAG, 514 self._BUILDS, 515 self._BOARD, 516 self.devserver, 517 afe=self.afe, 518 tko=self.tko, 519 job_retry=True) 520 suite.schedule(recorder.record_entry) 521 522 523 def testSuiteMaxRetries(self): 524 """Test suite max retries.""" 525 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 526 'name-data_four', 'name-data_five', 527 'name-data_six', 'name-data_seven'] 528 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: 7, 529 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 530 531 self.mock_control_file_parsing() 532 recorder = MagicMock(base_job.base_job) 533 self.expect_job_scheduling(recorder) 534 with patch.object(utils, 'write_keyval') as utils_patch: 535 utils_patch.write_keyval(None, keyval_dict) 536 suite = Suite.create_from_name(self._TAG, 537 self._BUILDS, 538 self._BOARD, 539 self.devserver, 540 afe=self.afe, 541 tko=self.tko, 542 job_retry=True, 543 max_retries=1) 544 suite.schedule(recorder.record_entry) 545 self.assertEqual(suite._retry_handler._max_retries, 1) 546 # Find the job_id of the test that allows retry 547 job_id = next(six.iterkeys(suite._retry_handler._retry_map)) 548 suite._retry_handler.add_retry(old_job_id=job_id, new_job_id=10) 549 self.assertEqual(suite._retry_handler._max_retries, 0) 550 551 552 def testSuiteDependencies(self): 553 """Should add suite dependencies to tests scheduled.""" 554 name_list = ['name-data_one', 'name-data_two', 'name-data_three', 555 'name-data_four', 'name-data_five', 'name-data_six', 556 'name-data_seven'] 557 keyval_dict = {constants.SCHEDULED_TEST_COUNT_KEY: len(name_list), 558 constants.SCHEDULED_TEST_NAMES_KEY: repr(name_list)} 559 560 self.mock_control_file_parsing() 561 recorder = MagicMock(base_job.base_job) 562 self.expect_job_scheduling(recorder, suite_deps=['extra']) 563 with patch.object(utils, 'write_keyval') as utils_patch: 564 utils_patch.write_keyval(None, keyval_dict) 565 566 suite = Suite.create_from_name(self._TAG, 567 self._BUILDS, 568 self._BOARD, 569 self.devserver, 570 extra_deps=['extra'], 571 afe=self.afe, 572 tko=self.tko) 573 suite.schedule(recorder.record_entry) 574 575 576 def testInheritedKeyvals(self): 577 """Tests should inherit some allowlisted job keyvals.""" 578 # Only keyvals in constants.INHERITED_KEYVALS are inherited to tests. 579 job_keyvals = { 580 constants.KEYVAL_CIDB_BUILD_ID: '111', 581 constants.KEYVAL_CIDB_BUILD_STAGE_ID: '222', 582 constants.KEYVAL_BRANCH: 'placeholder_branch', 583 constants.KEYVAL_BUILDER_NAME: 'model-placeholder', 584 constants.KEYVAL_MAIN_BUILDER_NAME: 'main-placeholder', 585 'your': 'name', 586 } 587 test_keyvals = { 588 constants.KEYVAL_CIDB_BUILD_ID: '111', 589 constants.KEYVAL_CIDB_BUILD_STAGE_ID: '222', 590 constants.KEYVAL_BRANCH: 'placeholder_branch', 591 constants.KEYVAL_BUILDER_NAME: 'model-placeholder', 592 constants.KEYVAL_MAIN_BUILDER_NAME: 'main-placeholder', 593 } 594 595 self.mock_control_file_parsing() 596 recorder = MagicMock(base_job.base_job) 597 self.expect_job_scheduling( 598 recorder, 599 extra_keyvals=test_keyvals) 600 with patch.object(utils, 'write_keyval') as utils_patch: 601 utils_patch.write_keyval(None, job_keyvals) 602 utils_patch.write_keyval(None, ANY) 603 604 suite = Suite.create_from_name(self._TAG, self._BUILDS, self._BOARD, 605 self.devserver, 606 afe=self.afe, tko=self.tko, 607 job_keyvals=job_keyvals) 608 suite.schedule(recorder.record_entry) 609 610 611 def _createSuiteWithMockedTestsAndControlFiles(self, file_bugs=False): 612 """Create a Suite, using mocked tests and control file contents. 613 614 @return Suite object, after mocking out behavior needed to create it. 615 """ 616 self.result_reporter = _MemoryResultReporter() 617 self.expect_control_file_parsing() 618 suite = Suite.create_from_name( 619 self._TAG, 620 self._BUILDS, 621 self._BOARD, 622 self.devserver, 623 self.getter, 624 afe=self.afe, 625 tko=self.tko, 626 file_bugs=file_bugs, 627 job_retry=True, 628 result_reporter=self.result_reporter, 629 ) 630 return suite 631 632 633 def _createSuiteMockResults(self, results_dir=None, result_status='FAIL'): 634 """Create a suite, returned a set of mocked results to expect. 635 636 @param results_dir: A mock results directory. 637 @param result_status: A desired result status, e.g. 'FAIL', 'WARN'. 638 639 @return List of mocked results to wait on. 640 """ 641 self.suite = self._createSuiteWithMockedTestsAndControlFiles( 642 file_bugs=True) 643 self.suite._results_dir = results_dir 644 test_report = self._get_bad_test_report(result_status) 645 test_predicates = test_report.predicates 646 test_fallout = test_report.fallout 647 648 self.recorder = MagicMock(base_job.base_job) 649 self.recorder.record_entry = MagicMock(base_job.base_job.record_entry) 650 self._mock_recorder_with_results([test_predicates], self.recorder) 651 return [test_predicates, test_fallout] 652 653 654 def _mock_recorder_with_results(self, results, recorder): 655 """ 656 Checks that results are recoded in order, eg: 657 START, (status, name, reason) END 658 659 @param results: list of results 660 @param recorder: status recorder 661 """ 662 for result in results: 663 status = result[0] 664 test_name = result[1] 665 recorder.record_entry( 666 StatusContains.CreateFromStrings('START', test_name), 667 log_in_subdir=False) 668 recorder.record_entry( 669 StatusContains.CreateFromStrings(*result), 670 log_in_subdir=False).InAnyOrder('results') 671 recorder.record_entry( 672 StatusContains.CreateFromStrings('END %s' % status, test_name), 673 log_in_subdir=False) 674 675 676 def schedule_and_expect_these_results(self, suite, results, recorder): 677 """Create stubs for call to suite.schedule and 678 job_status.wait_for_results 679 680 @param suite: suite object for which to stub out schedule(...) 681 @param results: results object to be returned from 682 job_stats_wait_for_results(...) 683 @param recorder: mocked recorder object to replay status messages 684 """ 685 def result_generator(results): 686 """A simple generator which generates results as Status objects. 687 688 This generator handles 'send' by simply ignoring it. 689 690 @param results: results object to be returned from 691 job_stats_wait_for_results(...) 692 @yield: job_status.Status objects. 693 """ 694 results = [job_status.Status(*r) for r in results] 695 for r in results: 696 new_input = (yield r) 697 if new_input: 698 yield None 699 700 suite_schedule_patch = patch.object(suite, 'schedule') 701 self.suite_schedule_mock = suite_schedule_patch.start() 702 self.addCleanup(suite_schedule_patch.stop) 703 self.suite_schedule_mock(recorder.record_entry) 704 suite._retry_handler = RetryHandler({}) 705 706 waiter_patch = patch.object(job_status.JobResultWaiter, 707 'wait_for_results', 708 autospec=True) 709 waiter_mock = waiter_patch.start() 710 waiter_mock.return_value = result_generator(results) 711 self.addCleanup(waiter_patch.stop) 712 713 @patch('autotest_lib.client.common_lib.base_job.base_job', 714 autospec=base_job.base_job) 715 def testRunAndWaitSuccess(self, recorder): 716 """Should record successful results.""" 717 suite = self._createSuiteWithMockedTestsAndControlFiles() 718 719 results = [('GOOD', 'good'), ('FAIL', 'bad', 'reason')] 720 self._mock_recorder_with_results(results, recorder) 721 self.schedule_and_expect_these_results(suite, results, recorder) 722 suite.schedule(recorder.record_entry) 723 suite.wait(recorder.record_entry) 724 725 @patch('autotest_lib.client.common_lib.base_job.base_job', 726 autospec=base_job.base_job) 727 def testRunAndWaitFailure(self, recorder): 728 """Should record failure to gather results.""" 729 suite = self._createSuiteWithMockedTestsAndControlFiles() 730 731 recorder.record_entry( 732 StatusContains.CreateFromStrings('FAIL', self._TAG, 'waiting'), 733 log_in_subdir=False) 734 735 with patch.object(suite, 'schedule') as ss: 736 ss.schedule(recorder.record_entry) 737 738 with patch.object(job_status.JobResultWaiter, 739 'wait_for_results', 740 autospec=True) as wait_mock: 741 wait_mock.side_effect = Exception 742 suite.schedule(recorder.record_entry) 743 suite.wait(recorder.record_entry) 744 745 @patch('autotest_lib.client.common_lib.base_job.base_job', 746 autospec=base_job.base_job) 747 def testRunAndWaitScheduleFailure(self, recorder): 748 """Should record failure to schedule jobs.""" 749 self.additional_mocking() 750 suite = self._createSuiteWithMockedTestsAndControlFiles() 751 recorder.record_entry( 752 StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG), 753 log_in_subdir=False) 754 755 recorder.record_entry( 756 StatusContains.CreateFromStrings('FAIL', self._TAG, 'scheduling'), 757 log_in_subdir=False) 758 759 local_patcher = patch.object(suite._job_creator, 'create_job') 760 patcher = local_patcher.start() 761 self.addCleanup(local_patcher.stop) 762 patcher.side_effect = (Exception('Expected during test.')) 763 764 suite.schedule(recorder.record_entry) 765 suite.wait(recorder.record_entry) 766 patcher.assert_called_with(ANY, retry_for=ANY) 767 768 769 def testGetTestsSortedByTime(self): 770 """Should find all tests and sorted by TIME setting.""" 771 self.additional_mocking() 772 self.expect_control_file_parsing() 773 # Get all tests. 774 tests = SuiteBase.find_and_parse_tests(self.getter, 775 lambda d: True, 776 self._TAG) 777 self.assertEquals(len(tests), 7) 778 times = [control_data.ControlData.get_test_time_index(test.time) 779 for test in tests] 780 self.assertTrue(all(x>=y for x, y in zip(times, times[1:])), 781 'Tests are not ordered correctly.') 782 783 784 def _get_bad_test_report(self, result_status='FAIL'): 785 """ 786 Fetch the predicates of a failing test, and the parameters 787 that are a fallout of this test failing. 788 """ 789 predicates = collections.namedtuple('predicates', 790 'status, testname, reason') 791 fallout = collections.namedtuple('fallout', 792 ('time_start, time_end, job_id,' 793 'username, hostname')) 794 test_report = collections.namedtuple('test_report', 795 'predicates, fallout') 796 return test_report(predicates(result_status, 'bad_test', 797 'dreadful_reason'), 798 fallout('2014-01-01 01:01:01', 'None', 799 self._FAKE_JOB_ID, 'user', 'myhost')) 800 801 802 def testJobRetryTestFail(self): 803 """Test retry works.""" 804 self.additional_mocking() 805 test_to_retry = self.files['seven'] 806 fake_new_job_id = self._FAKE_JOB_ID + 1 807 fake_job = FakeJob(id=self._FAKE_JOB_ID) 808 fake_new_job = FakeJob(id=fake_new_job_id) 809 810 test_results = self._createSuiteMockResults() 811 self.schedule_and_expect_these_results( 812 self.suite, 813 [test_results[0] + test_results[1]], 814 self.recorder) 815 with patch.object(self.suite._job_creator, 'create_job') as suite_mock: 816 817 suite_mock.return_value = fake_new_job 818 self.suite.schedule(self.recorder.record_entry) 819 self.suite._retry_handler._retry_map = { 820 self._FAKE_JOB_ID: { 821 'state': RetryHandler.States.NOT_ATTEMPTED, 822 'retry_max': 1 823 } 824 } 825 self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry 826 self.suite.wait(self.recorder.record_entry) 827 expected_retry_map = { 828 self._FAKE_JOB_ID: { 829 'state': RetryHandler.States.RETRIED, 830 'retry_max': 1 831 }, 832 fake_new_job_id: { 833 'state': RetryHandler.States.NOT_ATTEMPTED, 834 'retry_max': 0 835 } 836 } 837 # Check retry map is correctly updated 838 self.assertEquals(self.suite._retry_handler._retry_map, 839 expected_retry_map) 840 # Check _jobs_to_tests is correctly updated 841 self.assertEquals(self.suite._jobs_to_tests[fake_new_job_id], 842 test_to_retry) 843 844 def testJobRetryTestWarn(self): 845 """Test that no retry is scheduled if test warns.""" 846 self.additional_mocking() 847 test_to_retry = self.files['seven'] 848 fake_job = FakeJob(id=self._FAKE_JOB_ID) 849 test_results = self._createSuiteMockResults(result_status='WARN') 850 self.schedule_and_expect_these_results( 851 self.suite, 852 [test_results[0] + test_results[1]], 853 self.recorder) 854 self.suite.schedule(self.recorder.record_entry) 855 self.suite._retry_handler._retry_map = { 856 self._FAKE_JOB_ID: {'state': RetryHandler.States.NOT_ATTEMPTED, 857 'retry_max': 1} 858 } 859 self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry 860 expected_jobs_to_tests = self.suite._jobs_to_tests.copy() 861 expected_retry_map = self.suite._retry_handler._retry_map.copy() 862 self.suite.wait(self.recorder.record_entry) 863 self.assertTrue(self.result_reporter.results) 864 # Check retry map and _jobs_to_tests, ensure no retry was scheduled. 865 self.assertEquals(self.suite._retry_handler._retry_map, 866 expected_retry_map) 867 self.assertEquals(self.suite._jobs_to_tests, expected_jobs_to_tests) 868 869 def testFailedJobRetry(self): 870 """Make sure the suite survives even if the retry failed.""" 871 self.additional_mocking() 872 test_to_retry = self.files['seven'] 873 FakeJob(id=self._FAKE_JOB_ID) 874 875 test_results = self._createSuiteMockResults() 876 self.schedule_and_expect_these_results( 877 self.suite, 878 [test_results[0] + test_results[1]], 879 self.recorder) 880 with patch.object(self.suite._job_creator, 881 'create_job') as suite_mock, patch.object( 882 self.suite, '_should_report') as report_mock: 883 suite_mock.side_effect = error.RPCException('Expected during test') 884 885 # Do not file a bug. 886 report_mock.return_value = False 887 888 self.suite.schedule(self.recorder.record_entry) 889 self.suite._retry_handler._retry_map = { 890 self._FAKE_JOB_ID: { 891 'state': RetryHandler.States.NOT_ATTEMPTED, 892 'retry_max': 1 893 } 894 } 895 self.suite._jobs_to_tests[self._FAKE_JOB_ID] = test_to_retry 896 self.suite.wait(self.recorder.record_entry) 897 expected_retry_map = { 898 self._FAKE_JOB_ID: { 899 'state': RetryHandler.States.ATTEMPTED, 900 'retry_max': 1 901 } 902 } 903 expected_jobs_to_tests = self.suite._jobs_to_tests.copy() 904 self.assertEquals(self.suite._retry_handler._retry_map, 905 expected_retry_map) 906 self.assertEquals(self.suite._jobs_to_tests, 907 expected_jobs_to_tests) 908 909 suite_mock.assert_called_with(test_to_retry, 910 retry_for=self._FAKE_JOB_ID) 911 912 913class _MemoryResultReporter(SuiteBase._ResultReporter): 914 """Reporter that stores results internally for testing.""" 915 def __init__(self): 916 self.results = [] 917 918 def report(self, result): 919 """Reports the result by storing it internally.""" 920 self.results.append(result) 921 922 923if __name__ == '__main__': 924 unittest.main() 925