xref: /aosp_15_r20/external/autotest/client/cros/crash/crash_test.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import glob
6import logging
7import os
8import random
9import re
10import shutil
11import time
12
13import common
14from autotest_lib.client.bin import test, utils
15from autotest_lib.client.common_lib import error
16from autotest_lib.client.cros import constants, cros_logging
17
18
19_CRASH_RUN_STATE_DIR = '/run/crash_reporter'
20
21
22class FilterOut:
23    """contextmanager-compatible class to block certain crashes during tests."""
24
25    def __init__(self, name):
26        self._FILTER_OUT = _CRASH_RUN_STATE_DIR + '/filter-out'
27        self.name = name
28
29    def __enter__(self):
30        """Writes the given parameter to the filter-out file.
31
32        This is used to ignore crashes in which we have no interest.
33        """
34        utils.open_write_close(self._FILTER_OUT, self.name)
35
36    def __exit__(self, ex_type, value, traceback):
37        """Remove the filter-out file.
38
39        Next time the crash reporter is invoked, it will not filter crashes."""
40        os.remove(self._FILTER_OUT)
41        # Do *not* handle any exception
42        return False
43
44
45class CrashTest(test.test):
46    """
47    This class deals with running crash tests, which are tests which crash a
48    user-space program (or the whole machine) and generate a core dump. We
49    want to check that the correct crash dump is available and can be
50    retrieved.
51
52    Chromium OS has a crash sender which checks for new crash data and sends
53    it to a server. This crash data is used to track software quality and find
54    bugs. The system crash sender normally is always running, but can be paused
55    by creating _PAUSE_FILE. When crash sender sees this, it pauses operation.
56
57    For testing purposes we sometimes want to run the crash sender manually.
58    In this case we can pass the --ignore_pause_file flag and run the crash
59    sender manually.
60
61    Also for testing we sometimes want to mock out the crash sender, and just
62    have it pretend to succeed or fail. The _MOCK_CRASH_SENDING file is used
63    for this. If it doesn't exist, then the crash sender runs normally. If
64    it exists but is empty, the crash sender will succeed (but actually do
65    nothing). If the file contains something, then the crash sender will fail.
66
67    If the user consents to sending crash tests, then the _CONSENT_FILE will
68    exist in the home directory. This test needs to create this file for the
69    crash sending to work. The metrics daemon caches the consent state for
70    1 second, so we need to sleep for more than that after changing it to be
71    sure it picks up the change.
72
73    Crash reports are rate limited to a certain number of reports each 24
74    hours. If the maximum number has already been sent then reports are held
75    until later. This is administered by a directory _CRASH_SENDER_RATE_DIR
76    which contains one temporary file for each time a report is sent.
77
78    The class provides the ability to push a consent file. This disables
79    consent for this test but allows it to be popped back at later. This
80    makes nested tests easier. If _automatic_consent_saving is True (the
81    default) then consent will be pushed at the start and popped at the end.
82
83    Interesting variables:
84        _log_reader: the log reader used for reading log files
85        _leave_crash_sending: True to enable crash sending on exit from the
86            test, False to disable it. (Default True).
87        _automatic_consent_saving: True to push the consent at the start of
88            the test and pop it afterwards. (Default True).
89
90    Useful places to look for more information are:
91
92    chromeos/src/platform/crash-reporter/crash_sender
93        - sender script which crash crash reporter to create reports, then
94
95    chromeos/src/platform/crash-reporter/
96        - crash reporter program
97    """
98
99
100    _CONSENT_FILE = '/home/chronos/Consent To Send Stats'
101    _CORE_PATTERN = '/proc/sys/kernel/core_pattern'
102    _LOCK_CORE_PATTERN = '/proc/sys/kernel/lock_core_pattern'
103    _CRASH_REPORTER_PATH = '/sbin/crash_reporter'
104    _CRASH_SENDER_PATH = '/sbin/crash_sender'
105    _CRASH_SENDER_RATE_DIR = '/var/lib/crash_sender'
106    _CRASH_SENDER_LOCK_PATH = '/run/lock/crash_sender'
107    _CRASH_TEST_IN_PROGRESS = _CRASH_RUN_STATE_DIR + '/crash-test-in-progress'
108    _MOCK_CRASH_SENDING = _CRASH_RUN_STATE_DIR + '/mock-crash-sending'
109    _FILTER_IN = _CRASH_RUN_STATE_DIR + '/filter-in'
110    _PAUSE_FILE = '/var/lib/crash_sender_paused'
111    _SYSTEM_CRASH_DIR = '/var/spool/crash'
112    _FALLBACK_USER_CRASH_DIR = '/home/chronos/crash'
113    _REBOOT_VAULT_CRASH_DIR = '/mnt/stateful_partition/reboot_vault/crash'
114    _USER_CRASH_DIRS = '/home/chronos/u-*/crash'
115    _USER_CRASH_DIR_REGEX = re.compile('/home/chronos/u-([a-f0-9]+)/crash')
116
117    # Matches kDefaultMaxUploadBytes
118    _MAX_CRASH_SIZE = 1024 * 1024
119
120    # Use the same file format as crash does normally:
121    # <basename>.#.#.#.#.meta
122    _FAKE_TEST_BASENAME = 'fake.1.2.3.4'
123
124    def _set_system_sending(self, is_enabled):
125        """Sets whether or not the system crash_sender is allowed to run.
126
127        This is done by creating or removing _PAUSE_FILE.
128
129        crash_sender may still be allowed to run if _call_sender_one_crash is
130        called with 'ignore_pause=True'.
131
132        @param is_enabled: True to enable crash_sender, False to disable it.
133        """
134        if is_enabled:
135            if os.path.exists(self._PAUSE_FILE):
136                os.remove(self._PAUSE_FILE)
137        else:
138            utils.system('touch ' + self._PAUSE_FILE)
139
140    def _remove_all_files_in_dir(self, d):
141        """Recursively remove all of the files in |d|, without removing |d|.
142      """
143        try:
144            root, dirs, files = next(os.walk(d))
145        except StopIteration:
146            return
147        for path in files:
148            os.remove(os.path.join(root, path))
149        for path in dirs:
150            shutil.rmtree(os.path.join(root, path))
151
152
153    def _reset_rate_limiting(self):
154        """Reset the count of crash reports sent today.
155
156        This clears the contents of the rate limiting directory which has
157        the effect of reseting our count of crash reports sent.
158        """
159        self._remove_all_files_in_dir(self._CRASH_SENDER_RATE_DIR)
160
161
162    def _clear_spooled_crashes(self):
163        """Clears system and user crash directories.
164
165        This will remove all crash reports which are waiting to be sent.
166        """
167        self._remove_all_files_in_dir(self._SYSTEM_CRASH_DIR)
168        self._remove_all_files_in_dir(self._REBOOT_VAULT_CRASH_DIR)
169        for d in glob.glob(self._USER_CRASH_DIRS):
170            self._remove_all_files_in_dir(d)
171        self._remove_all_files_in_dir(self._FALLBACK_USER_CRASH_DIR)
172
173
174    def _kill_running_sender(self):
175        """Kill the the crash_sender process if running."""
176        utils.system('pkill -9 -e --exact crash_sender', ignore_status=True)
177
178
179    def _set_sending_mock(self, mock_enabled):
180        """Enables / disables mocking of the sending process.
181
182        This uses the _MOCK_CRASH_SENDING file to achieve its aims. See notes
183        at the top.
184
185        @param mock_enabled: If True, mocking is enabled, else it is disabled.
186        """
187        if mock_enabled:
188            data = ''
189            logging.info('Setting sending mock')
190            utils.open_write_close(self._MOCK_CRASH_SENDING, data)
191        else:
192            utils.system('rm -f ' + self._MOCK_CRASH_SENDING)
193
194
195    def _set_consent(self, has_consent):
196        """Sets whether or not we have consent to send crash reports.
197
198        This creates or deletes the _CONSENT_FILE to control whether
199        crash_sender will consider that it has consent to send crash reports.
200        It also copies a policy blob with the proper policy setting.
201
202        @param has_consent: True to indicate consent, False otherwise
203        """
204        autotest_cros_dir = os.path.join(os.path.dirname(__file__), '..')
205        if has_consent:
206            if os.path.isdir(constants.DEVICESETTINGS_DIR):
207                # Create policy file that enables metrics/consent.
208                shutil.copy('%s/mock_metrics_on.policy' % autotest_cros_dir,
209                            constants.SIGNED_POLICY_FILE)
210                shutil.copy('%s/mock_metrics_owner.key' % autotest_cros_dir,
211                            constants.OWNER_KEY_FILE)
212            # Create deprecated consent file.  This is created *after* the
213            # policy file in order to avoid a race condition where chrome
214            # might remove the consent file if the policy's not set yet.
215            # We create it as a temp file first in order to make the creation
216            # of the consent file, owned by chronos, atomic.
217            # See crosbug.com/18413.
218            temp_file = self._CONSENT_FILE + '.tmp';
219            utils.open_write_close(temp_file, 'test-consent')
220            utils.system('chown chronos:chronos "%s"' % (temp_file))
221            shutil.move(temp_file, self._CONSENT_FILE)
222            logging.info('Created %s', self._CONSENT_FILE)
223        else:
224            if os.path.isdir(constants.DEVICESETTINGS_DIR):
225                # Create policy file that disables metrics/consent.
226                shutil.copy('%s/mock_metrics_off.policy' % autotest_cros_dir,
227                            constants.SIGNED_POLICY_FILE)
228                shutil.copy('%s/mock_metrics_owner.key' % autotest_cros_dir,
229                            constants.OWNER_KEY_FILE)
230            # Remove deprecated consent file.
231            utils.system('rm -f "%s"' % (self._CONSENT_FILE))
232        # Ensure cached consent state is updated.
233        time.sleep(2)
234
235
236    def _set_crash_test_in_progress(self, in_progress):
237        if in_progress:
238            utils.open_write_close(self._CRASH_TEST_IN_PROGRESS, 'in-progress')
239            logging.info('Created %s', self._CRASH_TEST_IN_PROGRESS)
240        else:
241            utils.system('rm -f "%s"' % (self._CRASH_TEST_IN_PROGRESS))
242
243
244    def _get_pushed_consent_file_path(self):
245        """Returns filename of the pushed consent file."""
246        return os.path.join(self.bindir, 'pushed_consent')
247
248
249    def _get_pushed_policy_file_path(self):
250        """Returns filename of the pushed policy file."""
251        return os.path.join(self.bindir, 'pushed_policy')
252
253
254    def _get_pushed_owner_key_file_path(self):
255        """Returns filename of the pushed owner.key file."""
256        return os.path.join(self.bindir, 'pushed_owner_key')
257
258
259    def _push_consent(self):
260        """Push the consent file, thus disabling consent.
261
262        The consent files can be created in the new test if required. Call
263        _pop_consent() to restore the original state.
264        """
265        if os.path.exists(self._CONSENT_FILE):
266            shutil.move(self._CONSENT_FILE,
267                        self._get_pushed_consent_file_path())
268        if os.path.exists(constants.SIGNED_POLICY_FILE):
269            shutil.move(constants.SIGNED_POLICY_FILE,
270                        self._get_pushed_policy_file_path())
271        if os.path.exists(constants.OWNER_KEY_FILE):
272            shutil.move(constants.OWNER_KEY_FILE,
273                        self._get_pushed_owner_key_file_path())
274        # Ensure cached consent state is updated.
275        time.sleep(2)
276
277
278    def _pop_consent(self):
279        """Pop the consent files, enabling/disabling consent as it was before
280        we pushed the consent."""
281        if os.path.exists(self._get_pushed_consent_file_path()):
282            shutil.move(self._get_pushed_consent_file_path(),
283                        self._CONSENT_FILE)
284        else:
285            utils.system('rm -f "%s"' % self._CONSENT_FILE)
286        if os.path.exists(self._get_pushed_policy_file_path()):
287            shutil.move(self._get_pushed_policy_file_path(),
288                        constants.SIGNED_POLICY_FILE)
289        else:
290            utils.system('rm -f "%s"' % constants.SIGNED_POLICY_FILE)
291        if os.path.exists(self._get_pushed_owner_key_file_path()):
292            shutil.move(self._get_pushed_owner_key_file_path(),
293                        constants.OWNER_KEY_FILE)
294        else:
295            utils.system('rm -f "%s"' % constants.OWNER_KEY_FILE)
296        # Ensure cached consent state is updated.
297        time.sleep(2)
298
299
300    def _get_crash_dir(self, username, force_user_crash_dir=False):
301        """Returns crash directory for process running as the given user.
302
303        @param username: Unix user of the crashing process.
304        @param force_user_crash_dir: Regardless of |username|, return the crash
305                                     directory of the current user session, or
306                                     the fallback directory if no sessions.
307        """
308        if username in ('root', 'crash') and not force_user_crash_dir:
309            return self._SYSTEM_CRASH_DIR
310        else:
311            dirs = glob.glob(self._USER_CRASH_DIRS)
312            return dirs[0] if dirs else self._FALLBACK_USER_CRASH_DIR
313
314
315    def _canonicalize_crash_dir(self, crash_dir):
316        """Converts /home/chronos crash directory to /home/user counterpart.
317
318        @param crash_dir: A path of the form /home/chronos/u-<hash>/crash.
319        @returns /home/user/<hash>/crash, or |crash_dir| on form mismatch.
320        """
321        match = re.match(self._USER_CRASH_DIR_REGEX, crash_dir)
322        return ('/home/user/%s/crash' % match.group(1)) if match else crash_dir
323
324
325    def _initialize_crash_reporter(self, lock_core_pattern):
326        """Start up the crash reporter.
327
328        @param lock_core_pattern: lock core pattern during initialization.
329        """
330
331        if not lock_core_pattern:
332            self._set_crash_test_in_progress(False)
333        utils.system('%s --init' % self._CRASH_REPORTER_PATH)
334        if not lock_core_pattern:
335            self._set_crash_test_in_progress(True)
336            # Completely disable crash_reporter from generating crash dumps
337            # while any tests are running, otherwise a crashy system can make
338            # these tests flaky.
339            self.enable_crash_filtering('none')
340
341
342    def get_crash_dir_name(self, name):
343        """Return the full path for |name| inside the system crash directory."""
344        return os.path.join(self._SYSTEM_CRASH_DIR, name)
345
346
347    def write_crash_dir_entry(self, name, contents):
348        """Writes a file to the system crash directory.
349
350        This writes a file to _SYSTEM_CRASH_DIR with the given name. This is
351        used to insert new crash dump files for testing purposes.
352
353        If contents is not a string, binary data is assumed.
354
355        @param name: Name of file to write.
356        @param contents: String/binary data to write to the file.
357        """
358        entry = self.get_crash_dir_name(name)
359        if not os.path.exists(self._SYSTEM_CRASH_DIR):
360            os.makedirs(self._SYSTEM_CRASH_DIR)
361
362        is_binary = not isinstance(contents, str)
363        utils.open_write_close(entry, contents, is_binary)
364
365        return entry
366
367
368    def write_fake_meta(self, name, exec_name, payload, complete=True):
369        """Writes a fake meta entry to the system crash directory.
370
371        @param name: Name of file to write.
372        @param exec_name: Value for exec_name item.
373        @param payload: Value for payload item.
374        @param complete: True to close off the record, otherwise leave it
375                incomplete.
376        """
377        last_line = ''
378        if complete:
379            last_line = 'done=1\n'
380        contents = ('exec_name=%s\n'
381                    'ver=my_ver\n'
382                    'payload=%s\n'
383                    '%s' % (exec_name, payload,
384                            last_line))
385        return self.write_crash_dir_entry(name, contents)
386
387    def _get_dmp_contents(self):
388        """Creates the contents of the dmp file for our made crashes.
389
390        The dmp file contents are deliberately large and hard-to-compress. This
391        ensures logging_CrashSender hits its bytes/day cap before its sends/day
392        cap.
393        """
394        return bytearray(
395                [random.randint(0, 255) for n in range(self._MAX_CRASH_SIZE)])
396
397
398    def _prepare_sender_one_crash(self,
399                                  reports_enabled,
400                                  report):
401        """Create metadata for a fake crash report.
402
403        This enabled mocking of the crash sender, then creates a fake
404        crash report for testing purposes.
405
406        @param reports_enabled: True to enable consent to that reports will be
407                sent.
408        @param report: Report to use for crash, if None we create one.
409        """
410        self._set_sending_mock(mock_enabled=True)
411        self._set_consent(reports_enabled)
412        if report is None:
413            # Use the same file format as crash does normally:
414            # <basename>.#.#.#.meta
415            payload = os.path.basename(
416                    self.write_crash_dir_entry(
417                            '%s.dmp' % self._FAKE_TEST_BASENAME,
418                            self._get_dmp_contents()))
419            report = self.write_fake_meta(
420                '%s.meta' % self._FAKE_TEST_BASENAME, 'fake', payload)
421        return report
422
423
424    def _parse_sender_output(self, output):
425        """Parse the log output from the crash_sender script.
426
427        This script can run on the logs from either a mocked or true
428        crash send. It looks for one and only one crash from output.
429        Non-crash anomalies should be ignored since there're just noise
430        during running the test.
431
432        @param output: output from the script
433
434        @returns A dictionary with these values:
435            exec_name: name of executable which crashed
436            image_type: type of image ("dev","test",...), if given
437            boot_mode: current boot mode ("dev",...), if given
438            meta_path: path to the report metadata file
439            output: the output from the script, copied
440            report_kind: kind of report sent (minidump vs kernel)
441            send_attempt: did the script attempt to send a crash.
442            send_success: if it attempted, was the crash send successful.
443            sig: signature of the report, if given.
444            sleep_time: if it attempted, how long did it sleep before
445              sending (if mocked, how long would it have slept)
446        """
447        anomaly_types = (
448            'kernel_suspend_warning',
449            'kernel_warning',
450            'kernel_wifi_warning',
451            'selinux_violation',
452            'service_failure',
453        )
454
455        def crash_sender_search(regexp, output):
456            """Narrow search to lines from crash_sender."""
457            return re.search(r'crash_sender\[\d+\]:\s+' + regexp, output)
458
459        before_first_crash = None
460        while True:
461            crash_header = crash_sender_search(
462                'Considering metadata (\S+)',
463                output
464            )
465            if not crash_header:
466                break
467            if before_first_crash is None:
468                before_first_crash = output[:crash_header.start()]
469            meta_considered = crash_header.group(1)
470            is_anomaly = any(x in meta_considered for x in anomaly_types)
471            if is_anomaly:
472                # If it's an anomaly, skip this header, and look for next
473                # one.
474                output = output[crash_header.end():]
475            else:
476                # If it's not an anomaly, skip everything before this
477                # header.
478                output = output[crash_header.start():]
479                break
480        if before_first_crash:
481            output = before_first_crash + output
482        logging.debug('Filtered sender output to parse:\n%s', output)
483
484        sleep_match = crash_sender_search('Scheduled to send in (\d+)s', output)
485        send_attempt = sleep_match is not None
486        if send_attempt:
487            sleep_time = int(sleep_match.group(1))
488        else:
489            sleep_time = None
490
491        meta_match = crash_sender_search('Metadata: (\S+) \((\S+)\)', output)
492        if meta_match:
493            meta_path = meta_match.group(1)
494            report_kind = meta_match.group(2)
495        else:
496            meta_path = None
497            report_kind = None
498
499        payload_match = crash_sender_search('Payload: (\S+)', output)
500        if payload_match:
501            report_payload = payload_match.group(1)
502        else:
503            report_payload = None
504
505        exec_name_match = crash_sender_search('Exec name: (\S+)', output)
506        if exec_name_match:
507            exec_name = exec_name_match.group(1)
508        else:
509            exec_name = None
510
511        sig_match = crash_sender_search('sig: (\S+)', output)
512        if sig_match:
513            sig = sig_match.group(1)
514        else:
515            sig = None
516
517        image_type_match = crash_sender_search('Image type: (\S+)', output)
518        if image_type_match:
519            image_type = image_type_match.group(1)
520        else:
521            image_type = None
522
523        boot_mode_match = crash_sender_search('Boot mode: (\S+)', output)
524        if boot_mode_match:
525            boot_mode = boot_mode_match.group(1)
526        else:
527            boot_mode = None
528
529        send_success = 'Mocking successful send' in output
530        return {'exec_name': exec_name,
531                'report_kind': report_kind,
532                'meta_path': meta_path,
533                'report_payload': report_payload,
534                'send_attempt': send_attempt,
535                'send_success': send_success,
536                'sig': sig,
537                'image_type': image_type,
538                'boot_mode': boot_mode,
539                'sleep_time': sleep_time,
540                'output': output}
541
542
543    def wait_for_sender_completion(self):
544        """Wait for crash_sender to complete.
545
546        Wait for no crash_sender's last message to be placed in the
547        system log before continuing and for the process to finish.
548        Otherwise we might get only part of the output."""
549        utils.poll_for_condition(
550            lambda: self._log_reader.can_find('crash_sender done.'),
551            timeout=60,
552            exception=error.TestError(
553              'Timeout waiting for crash_sender to emit done: ' +
554              self._log_reader.get_logs()))
555        utils.poll_for_condition(
556            lambda: utils.system('pgrep crash_sender',
557                                 ignore_status=True) != 0,
558            timeout=60,
559            exception=error.TestError(
560                'Timeout waiting for crash_sender to finish: ' +
561                self._log_reader.get_logs()))
562
563
564    def _call_sender_one_crash(self, reports_enabled=True, report=None):
565        """Call the crash sender script to mock upload one crash.
566
567        @param reports_enabled: Has the user consented to sending crash reports.
568        @param report: report to use for crash, if None we create one.
569
570        @returns a dictionary describing the result with the keys
571          from _parse_sender_output, as well as:
572            report_exists: does the minidump still exist after calling
573              send script
574            rate_count: how many crashes have been uploaded in the past
575              24 hours.
576        """
577        report = self._prepare_sender_one_crash(reports_enabled,
578                                                report)
579        self._log_reader.set_start_by_current()
580        script_output = ""
581        try:
582            script_output = utils.system_output(
583                '%s --ignore_pause_file 2>&1' % (self._CRASH_SENDER_PATH),
584                ignore_status=False)
585        except error.CmdError as err:
586            raise error.TestFail('"%s" returned an unexpected non-zero '
587                                 'value (%s).'
588                                 % (err.command, err.result_obj.exit_status))
589
590        self.wait_for_sender_completion()
591        output = self._log_reader.get_logs()
592        logging.debug('Crash sender message output:\n %s', output)
593
594        if script_output != '':
595            logging.debug('crash_sender stdout/stderr: %s', script_output)
596
597        if os.path.exists(report):
598            report_exists = True
599            os.remove(report)
600        else:
601            report_exists = False
602        if os.path.exists(self._CRASH_SENDER_RATE_DIR):
603            rate_count = len([
604                name for name in os.listdir(self._CRASH_SENDER_RATE_DIR)
605                if os.path.isfile(os.path.join(self._CRASH_SENDER_RATE_DIR,
606                                               name))
607            ])
608        else:
609            rate_count = 0
610
611        result = self._parse_sender_output(output)
612        result['report_exists'] = report_exists
613        result['rate_count'] = rate_count
614
615        # Show the result for debugging but remove 'output' key
616        # since it's large and earlier in debug output.
617        debug_result = dict(result)
618        del debug_result['output']
619        logging.debug('Result of send (besides output): %s', debug_result)
620
621        return result
622
623
624    def enable_crash_filtering(self, name):
625        """Writes the given parameter to the filter-in file.
626
627        This is used to collect only crashes in which we have an interest.
628
629        @param new_parameter: The filter to write to the file, if any.
630        """
631        utils.open_write_close(self._FILTER_IN, name)
632
633
634    def disable_crash_filtering(self):
635        """Remove the filter-in file.
636
637        Next time the crash reporter is invoked, it will not filter crashes."""
638        os.remove(self._FILTER_IN)
639
640
641    def initialize(self):
642        """Initalize the test."""
643        test.test.initialize(self)
644        self._log_reader = cros_logging.make_system_log_reader()
645        self._leave_crash_sending = True
646        self._automatic_consent_saving = True
647        self.enable_crash_filtering('none')
648        self._set_crash_test_in_progress(True)
649
650
651    def cleanup(self):
652        """Cleanup after the test.
653
654        We reset things back to the way we think they should be. This is
655        intended to allow the system to continue normal operation.
656
657        Some variables silently change the behavior:
658            _automatic_consent_saving: if True, we pop the consent file.
659            _leave_crash_sending: True to enable crash sending, False to
660                disable it
661        """
662        self._reset_rate_limiting()
663        self._clear_spooled_crashes()
664        self._set_system_sending(self._leave_crash_sending)
665        self._set_sending_mock(mock_enabled=False)
666        if self._automatic_consent_saving:
667            self._pop_consent()
668        self._set_crash_test_in_progress(False)
669
670        # Re-initialize crash reporter to clear any state left over
671        # (e.g. core_pattern)
672        self._initialize_crash_reporter(True)
673
674        self.disable_crash_filtering()
675
676        test.test.cleanup(self)
677
678
679    def run_crash_tests(self,
680                        test_names,
681                        initialize_crash_reporter=False,
682                        clear_spool_first=True,
683                        must_run_all=True,
684                        lock_core_pattern=False):
685        """Run crash tests defined in this class.
686
687        @param test_names: Array of test names.
688        @param initialize_crash_reporter: Should set up crash reporter for every
689                run.
690        @param clear_spool_first: Clear all spooled user/system crashes before
691                starting the test.
692        @param must_run_all: Should make sure every test in this class is
693                mentioned in test_names.
694        @param lock_core_pattern: Lock core_pattern while initializing
695                crash_reporter.
696        """
697        if self._automatic_consent_saving:
698            self._push_consent()
699
700        if must_run_all:
701            # Check test_names is complete
702            for attr in dir(self):
703                if attr.find('_test_') == 0:
704                    test_name = attr[6:]
705                    if not test_name in test_names:
706                        raise error.TestError('Test %s is missing' % test_name)
707
708        for test_name in test_names:
709            logging.info(('=' * 20) + ('Running %s' % test_name) + ('=' * 20))
710            if initialize_crash_reporter:
711                self._initialize_crash_reporter(lock_core_pattern)
712            # Disable crash_sender from running, kill off any running ones.
713            # We set a flag to crash_sender when invoking it manually to avoid
714            # our invocations being paused.
715            self._set_system_sending(False)
716            self._kill_running_sender()
717            self._reset_rate_limiting()
718            if clear_spool_first:
719                self._clear_spooled_crashes()
720
721            # Call the test function
722            getattr(self, '_test_' + test_name)()
723
724        # Clear the intentional crashes, so that the server won't automatically
725        # report crash as failure.
726        self._clear_spooled_crashes()
727