xref: /aosp_15_r20/external/autotest/autotest_lib/client/bin/job.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1*9c5db199SXin Li# Lint as: python2, python3
2*9c5db199SXin Li"""The main job wrapper
3*9c5db199SXin Li
4*9c5db199SXin LiThis is the core infrastructure.
5*9c5db199SXin Li
6*9c5db199SXin LiCopyright Andy Whitcroft, Martin J. Bligh 2006
7*9c5db199SXin Li"""
8*9c5db199SXin Li
9*9c5db199SXin Li# pylint: disable=missing-docstring
10*9c5db199SXin Li
11*9c5db199SXin Liimport copy
12*9c5db199SXin Lifrom datetime import datetime
13*9c5db199SXin Liimport getpass
14*9c5db199SXin Liimport glob
15*9c5db199SXin Liimport logging
16*9c5db199SXin Liimport os
17*9c5db199SXin Liimport re
18*9c5db199SXin Liimport shutil
19*9c5db199SXin Liimport sys
20*9c5db199SXin Liimport time
21*9c5db199SXin Liimport traceback
22*9c5db199SXin Liimport types
23*9c5db199SXin Liimport weakref
24*9c5db199SXin Li
25*9c5db199SXin Liimport six
26*9c5db199SXin Li
27*9c5db199SXin Liimport common
28*9c5db199SXin Lifrom autotest_lib.client.bin import client_logging_config
29*9c5db199SXin Lifrom autotest_lib.client.bin import harness
30*9c5db199SXin Lifrom autotest_lib.client.bin import local_host
31*9c5db199SXin Lifrom autotest_lib.client.bin import parallel
32*9c5db199SXin Lifrom autotest_lib.client.bin import partition as partition_lib
33*9c5db199SXin Lifrom autotest_lib.client.bin import profilers
34*9c5db199SXin Lifrom autotest_lib.client.bin import sysinfo
35*9c5db199SXin Lifrom autotest_lib.client.bin import test
36*9c5db199SXin Lifrom autotest_lib.client.bin import utils
37*9c5db199SXin Lifrom autotest_lib.client.common_lib import barrier
38*9c5db199SXin Lifrom autotest_lib.client.common_lib import base_job
39*9c5db199SXin Lifrom autotest_lib.client.common_lib import control_data
40*9c5db199SXin Lifrom autotest_lib.client.common_lib import error
41*9c5db199SXin Lifrom autotest_lib.client.common_lib import global_config
42*9c5db199SXin Lifrom autotest_lib.client.common_lib import logging_manager
43*9c5db199SXin Lifrom autotest_lib.client.common_lib import packages
44*9c5db199SXin Lifrom autotest_lib.client.cros import cros_logging
45*9c5db199SXin Lifrom autotest_lib.client.tools import html_report
46*9c5db199SXin Li
47*9c5db199SXin LiGLOBAL_CONFIG = global_config.global_config
48*9c5db199SXin Li
49*9c5db199SXin LiLAST_BOOT_TAG = object()
50*9c5db199SXin LiJOB_PREAMBLE = """
51*9c5db199SXin Lifrom autotest_lib.client.common_lib.error import *
52*9c5db199SXin Lifrom autotest_lib.client.bin.utils import *
53*9c5db199SXin Li"""
54*9c5db199SXin Li
55*9c5db199SXin Li
56*9c5db199SXin Liclass StepError(error.AutotestError):
57*9c5db199SXin Li    pass
58*9c5db199SXin Li
59*9c5db199SXin Liclass NotAvailableError(error.AutotestError):
60*9c5db199SXin Li    pass
61*9c5db199SXin Li
62*9c5db199SXin Li
63*9c5db199SXin Li
64*9c5db199SXin Lidef _run_test_complete_on_exit(f):
65*9c5db199SXin Li    """Decorator for job methods that automatically calls
66*9c5db199SXin Li    self.harness.run_test_complete when the method exits, if appropriate."""
67*9c5db199SXin Li    def wrapped(self, *args, **dargs):
68*9c5db199SXin Li        try:
69*9c5db199SXin Li            return f(self, *args, **dargs)
70*9c5db199SXin Li        finally:
71*9c5db199SXin Li            if self._logger.global_filename == 'status':
72*9c5db199SXin Li                self.harness.run_test_complete()
73*9c5db199SXin Li                if self.drop_caches:
74*9c5db199SXin Li                    utils.drop_caches()
75*9c5db199SXin Li    wrapped.__name__ = f.__name__
76*9c5db199SXin Li    wrapped.__doc__ = f.__doc__
77*9c5db199SXin Li    wrapped.__dict__.update(f.__dict__)
78*9c5db199SXin Li    return wrapped
79*9c5db199SXin Li
80*9c5db199SXin Li
81*9c5db199SXin Liclass status_indenter(base_job.status_indenter):
82*9c5db199SXin Li    """Provide a status indenter that is backed by job._record_prefix."""
83*9c5db199SXin Li    def __init__(self, job_):
84*9c5db199SXin Li        self._job = weakref.proxy(job_)  # avoid a circular reference
85*9c5db199SXin Li
86*9c5db199SXin Li
87*9c5db199SXin Li    @property
88*9c5db199SXin Li    def indent(self):
89*9c5db199SXin Li        return self._job._record_indent
90*9c5db199SXin Li
91*9c5db199SXin Li
92*9c5db199SXin Li    def increment(self):
93*9c5db199SXin Li        self._job._record_indent += 1
94*9c5db199SXin Li
95*9c5db199SXin Li
96*9c5db199SXin Li    def decrement(self):
97*9c5db199SXin Li        self._job._record_indent -= 1
98*9c5db199SXin Li
99*9c5db199SXin Li
100*9c5db199SXin Liclass base_client_job(base_job.base_job):
101*9c5db199SXin Li    """The client-side concrete implementation of base_job.
102*9c5db199SXin Li
103*9c5db199SXin Li    Optional properties provided by this implementation:
104*9c5db199SXin Li        control
105*9c5db199SXin Li        harness
106*9c5db199SXin Li    """
107*9c5db199SXin Li
108*9c5db199SXin Li    _WARNING_DISABLE_DELAY = 5
109*9c5db199SXin Li
110*9c5db199SXin Li    # _record_indent is a persistent property, but only on the client
111*9c5db199SXin Li    _job_state = base_job.base_job._job_state
112*9c5db199SXin Li    _record_indent = _job_state.property_factory(
113*9c5db199SXin Li        '_state', '_record_indent', 0, namespace='client')
114*9c5db199SXin Li    _max_disk_usage_rate = _job_state.property_factory(
115*9c5db199SXin Li        '_state', '_max_disk_usage_rate', 0.0, namespace='client')
116*9c5db199SXin Li
117*9c5db199SXin Li
118*9c5db199SXin Li    def __init__(self, control, options, drop_caches=True):
119*9c5db199SXin Li        """
120*9c5db199SXin Li        Prepare a client side job object.
121*9c5db199SXin Li
122*9c5db199SXin Li        @param control: The control file (pathname of).
123*9c5db199SXin Li        @param options: an object which includes:
124*9c5db199SXin Li                jobtag: The job tag string (eg "default").
125*9c5db199SXin Li                cont: If this is the continuation of this job.
126*9c5db199SXin Li                harness_type: An alternative server harness.  [None]
127*9c5db199SXin Li                use_external_logging: If true, the enable_external_logging
128*9c5db199SXin Li                          method will be called during construction.  [False]
129*9c5db199SXin Li        @param drop_caches: If true, utils.drop_caches() is called before and
130*9c5db199SXin Li                between all tests.  [True]
131*9c5db199SXin Li        """
132*9c5db199SXin Li        super(base_client_job, self).__init__(options=options)
133*9c5db199SXin Li        self._pre_record_init(control, options)
134*9c5db199SXin Li        try:
135*9c5db199SXin Li            self._post_record_init(control, options, drop_caches)
136*9c5db199SXin Li        except Exception as err:
137*9c5db199SXin Li            self.record(
138*9c5db199SXin Li                    'ABORT', None, None,'client.bin.job.__init__ failed: %s' %
139*9c5db199SXin Li                    str(err))
140*9c5db199SXin Li            raise
141*9c5db199SXin Li
142*9c5db199SXin Li
143*9c5db199SXin Li    @classmethod
144*9c5db199SXin Li    def _get_environ_autodir(cls):
145*9c5db199SXin Li        return os.environ['AUTODIR']
146*9c5db199SXin Li
147*9c5db199SXin Li
148*9c5db199SXin Li    @classmethod
149*9c5db199SXin Li    def _find_base_directories(cls):
150*9c5db199SXin Li        """
151*9c5db199SXin Li        Determine locations of autodir and clientdir (which are the same)
152*9c5db199SXin Li        using os.environ. Serverdir does not exist in this context.
153*9c5db199SXin Li        """
154*9c5db199SXin Li        autodir = clientdir = cls._get_environ_autodir()
155*9c5db199SXin Li        return autodir, clientdir, None
156*9c5db199SXin Li
157*9c5db199SXin Li
158*9c5db199SXin Li    @classmethod
159*9c5db199SXin Li    def _parse_args(cls, args):
160*9c5db199SXin Li        return re.findall("[^\s]*?['|\"].*?['|\"]|[^\s]+", args)
161*9c5db199SXin Li
162*9c5db199SXin Li
163*9c5db199SXin Li    def _find_resultdir(self, options):
164*9c5db199SXin Li        """
165*9c5db199SXin Li        Determine the directory for storing results. On a client this is
166*9c5db199SXin Li        always <autodir>/results/<tag>, where tag is passed in on the command
167*9c5db199SXin Li        line as an option.
168*9c5db199SXin Li        """
169*9c5db199SXin Li        output_dir_config = GLOBAL_CONFIG.get_config_value('CLIENT',
170*9c5db199SXin Li                                                           'output_dir',
171*9c5db199SXin Li                                                            default="")
172*9c5db199SXin Li        if options.output_dir:
173*9c5db199SXin Li            basedir = options.output_dir
174*9c5db199SXin Li        elif output_dir_config:
175*9c5db199SXin Li            basedir = output_dir_config
176*9c5db199SXin Li        else:
177*9c5db199SXin Li            basedir = self.autodir
178*9c5db199SXin Li
179*9c5db199SXin Li        return os.path.join(basedir, 'results', options.tag)
180*9c5db199SXin Li
181*9c5db199SXin Li
182*9c5db199SXin Li    def _get_status_logger(self):
183*9c5db199SXin Li        """Return a reference to the status logger."""
184*9c5db199SXin Li        return self._logger
185*9c5db199SXin Li
186*9c5db199SXin Li
187*9c5db199SXin Li    def _pre_record_init(self, control, options):
188*9c5db199SXin Li        """
189*9c5db199SXin Li        Initialization function that should peform ONLY the required
190*9c5db199SXin Li        setup so that the self.record() method works.
191*9c5db199SXin Li
192*9c5db199SXin Li        As of now self.record() needs self.resultdir, self._group_level,
193*9c5db199SXin Li        self.harness and of course self._logger.
194*9c5db199SXin Li        """
195*9c5db199SXin Li        if not options.cont:
196*9c5db199SXin Li            self._cleanup_debugdir_files()
197*9c5db199SXin Li            self._cleanup_results_dir()
198*9c5db199SXin Li
199*9c5db199SXin Li        logging_manager.configure_logging(
200*9c5db199SXin Li            client_logging_config.ClientLoggingConfig(),
201*9c5db199SXin Li            results_dir=self.resultdir,
202*9c5db199SXin Li            verbose=options.verbose)
203*9c5db199SXin Li        logging.info('Writing results to %s', self.resultdir)
204*9c5db199SXin Li
205*9c5db199SXin Li        # init_group_level needs the state
206*9c5db199SXin Li        self.control = os.path.realpath(control)
207*9c5db199SXin Li        self._is_continuation = options.cont
208*9c5db199SXin Li        self._current_step_ancestry = []
209*9c5db199SXin Li        self._next_step_index = 0
210*9c5db199SXin Li        self._load_state()
211*9c5db199SXin Li
212*9c5db199SXin Li        _harness = self.handle_persistent_option(options, 'harness')
213*9c5db199SXin Li        _harness_args = self.handle_persistent_option(options, 'harness_args')
214*9c5db199SXin Li
215*9c5db199SXin Li        self.harness = harness.select(_harness, self, _harness_args)
216*9c5db199SXin Li
217*9c5db199SXin Li        if self.control:
218*9c5db199SXin Li            parsed_control = control_data.parse_control(
219*9c5db199SXin Li                    self.control, raise_warnings=False)
220*9c5db199SXin Li            self.fast = parsed_control.fast
221*9c5db199SXin Li
222*9c5db199SXin Li        # set up the status logger
223*9c5db199SXin Li        def client_job_record_hook(entry):
224*9c5db199SXin Li            msg_tag = ''
225*9c5db199SXin Li            if '.' in self._logger.global_filename:
226*9c5db199SXin Li                msg_tag = self._logger.global_filename.split('.', 1)[1]
227*9c5db199SXin Li            # send the entry to the job harness
228*9c5db199SXin Li            message = '\n'.join([entry.message] + entry.extra_message_lines)
229*9c5db199SXin Li            rendered_entry = self._logger.render_entry(entry)
230*9c5db199SXin Li            self.harness.test_status_detail(entry.status_code, entry.subdir,
231*9c5db199SXin Li                                            entry.operation, message, msg_tag,
232*9c5db199SXin Li                                            entry.fields)
233*9c5db199SXin Li            self.harness.test_status(rendered_entry, msg_tag)
234*9c5db199SXin Li            # send the entry to stdout, if it's enabled
235*9c5db199SXin Li            logging.info(rendered_entry)
236*9c5db199SXin Li        self._logger = base_job.status_logger(
237*9c5db199SXin Li            self, status_indenter(self), record_hook=client_job_record_hook)
238*9c5db199SXin Li
239*9c5db199SXin Li
240*9c5db199SXin Li    def _post_record_init(self, control, options, drop_caches):
241*9c5db199SXin Li        """
242*9c5db199SXin Li        Perform job initialization not required by self.record().
243*9c5db199SXin Li        """
244*9c5db199SXin Li        self._init_drop_caches(drop_caches)
245*9c5db199SXin Li
246*9c5db199SXin Li        self._init_packages()
247*9c5db199SXin Li
248*9c5db199SXin Li        self.sysinfo = sysinfo.sysinfo(self.resultdir)
249*9c5db199SXin Li        self._load_sysinfo_state()
250*9c5db199SXin Li
251*9c5db199SXin Li        if not options.cont:
252*9c5db199SXin Li            download = os.path.join(self.testdir, 'download')
253*9c5db199SXin Li            if not os.path.exists(download):
254*9c5db199SXin Li                os.mkdir(download)
255*9c5db199SXin Li
256*9c5db199SXin Li            shutil.copyfile(self.control,
257*9c5db199SXin Li                            os.path.join(self.resultdir, 'control'))
258*9c5db199SXin Li
259*9c5db199SXin Li        self.control = control
260*9c5db199SXin Li
261*9c5db199SXin Li        self.logging = logging_manager.get_logging_manager(
262*9c5db199SXin Li                manage_stdout_and_stderr=True, redirect_fds=True)
263*9c5db199SXin Li        self.logging.start_logging()
264*9c5db199SXin Li
265*9c5db199SXin Li        self.profilers = profilers.profilers(self)
266*9c5db199SXin Li
267*9c5db199SXin Li        self.machines = [options.hostname]
268*9c5db199SXin Li        self.machine_dict_list = [{'hostname' : options.hostname}]
269*9c5db199SXin Li        # Client side tests should always run the same whether or not they are
270*9c5db199SXin Li        # running in the lab.
271*9c5db199SXin Li        self.in_lab = False
272*9c5db199SXin Li        self.hosts = set([local_host.LocalHost(hostname=options.hostname)])
273*9c5db199SXin Li
274*9c5db199SXin Li        self.args = []
275*9c5db199SXin Li        if options.args:
276*9c5db199SXin Li            self.args = self._parse_args(options.args)
277*9c5db199SXin Li
278*9c5db199SXin Li        if options.user:
279*9c5db199SXin Li            self.user = options.user
280*9c5db199SXin Li        else:
281*9c5db199SXin Li            self.user = getpass.getuser()
282*9c5db199SXin Li
283*9c5db199SXin Li        self.sysinfo.log_per_reboot_data()
284*9c5db199SXin Li
285*9c5db199SXin Li        if not options.cont:
286*9c5db199SXin Li            self.record('START', None, None)
287*9c5db199SXin Li
288*9c5db199SXin Li        self.harness.run_start()
289*9c5db199SXin Li
290*9c5db199SXin Li        if options.log:
291*9c5db199SXin Li            self.enable_external_logging()
292*9c5db199SXin Li
293*9c5db199SXin Li        self.num_tests_run = None
294*9c5db199SXin Li        self.num_tests_failed = None
295*9c5db199SXin Li
296*9c5db199SXin Li        self.warning_loggers = None
297*9c5db199SXin Li        self.warning_manager = None
298*9c5db199SXin Li
299*9c5db199SXin Li
300*9c5db199SXin Li    def _init_drop_caches(self, drop_caches):
301*9c5db199SXin Li        """
302*9c5db199SXin Li        Perform the drop caches initialization.
303*9c5db199SXin Li        """
304*9c5db199SXin Li        self.drop_caches_between_iterations = (
305*9c5db199SXin Li                                    GLOBAL_CONFIG.get_config_value('CLIENT',
306*9c5db199SXin Li                                    'drop_caches_between_iterations',
307*9c5db199SXin Li                                    type=bool, default=True))
308*9c5db199SXin Li        self.drop_caches = drop_caches
309*9c5db199SXin Li        if self.drop_caches:
310*9c5db199SXin Li            utils.drop_caches()
311*9c5db199SXin Li
312*9c5db199SXin Li
313*9c5db199SXin Li    def _init_packages(self):
314*9c5db199SXin Li        """
315*9c5db199SXin Li        Perform the packages support initialization.
316*9c5db199SXin Li        """
317*9c5db199SXin Li        self.pkgmgr = packages.PackageManager(
318*9c5db199SXin Li            self.autodir, run_function_dargs={'timeout':3600})
319*9c5db199SXin Li
320*9c5db199SXin Li
321*9c5db199SXin Li    def _cleanup_results_dir(self):
322*9c5db199SXin Li        """Delete everything in resultsdir"""
323*9c5db199SXin Li        assert os.path.exists(self.resultdir)
324*9c5db199SXin Li        list_files = glob.glob('%s/*' % self.resultdir)
325*9c5db199SXin Li        for f in list_files:
326*9c5db199SXin Li            if os.path.isdir(f):
327*9c5db199SXin Li                shutil.rmtree(f)
328*9c5db199SXin Li            elif os.path.isfile(f):
329*9c5db199SXin Li                os.remove(f)
330*9c5db199SXin Li
331*9c5db199SXin Li
332*9c5db199SXin Li    def _cleanup_debugdir_files(self):
333*9c5db199SXin Li        """
334*9c5db199SXin Li        Delete any leftover debugdir files
335*9c5db199SXin Li        """
336*9c5db199SXin Li        list_files = glob.glob("/tmp/autotest_results_dir.*")
337*9c5db199SXin Li        for f in list_files:
338*9c5db199SXin Li            os.remove(f)
339*9c5db199SXin Li
340*9c5db199SXin Li
341*9c5db199SXin Li    def disable_warnings(self, warning_type):
342*9c5db199SXin Li        self.record("INFO", None, None,
343*9c5db199SXin Li                    "disabling %s warnings" % warning_type,
344*9c5db199SXin Li                    {"warnings.disable": warning_type})
345*9c5db199SXin Li        time.sleep(self._WARNING_DISABLE_DELAY)
346*9c5db199SXin Li
347*9c5db199SXin Li
348*9c5db199SXin Li    def enable_warnings(self, warning_type):
349*9c5db199SXin Li        time.sleep(self._WARNING_DISABLE_DELAY)
350*9c5db199SXin Li        self.record("INFO", None, None,
351*9c5db199SXin Li                    "enabling %s warnings" % warning_type,
352*9c5db199SXin Li                    {"warnings.enable": warning_type})
353*9c5db199SXin Li
354*9c5db199SXin Li
355*9c5db199SXin Li    def monitor_disk_usage(self, max_rate):
356*9c5db199SXin Li        """\
357*9c5db199SXin Li        Signal that the job should monitor disk space usage on /
358*9c5db199SXin Li        and generate a warning if a test uses up disk space at a
359*9c5db199SXin Li        rate exceeding 'max_rate'.
360*9c5db199SXin Li
361*9c5db199SXin Li        Parameters:
362*9c5db199SXin Li             max_rate - the maximium allowed rate of disk consumption
363*9c5db199SXin Li                        during a test, in MB/hour, or 0 to indicate
364*9c5db199SXin Li                        no limit.
365*9c5db199SXin Li        """
366*9c5db199SXin Li        self._max_disk_usage_rate = max_rate
367*9c5db199SXin Li
368*9c5db199SXin Li
369*9c5db199SXin Li    def control_get(self):
370*9c5db199SXin Li        return self.control
371*9c5db199SXin Li
372*9c5db199SXin Li
373*9c5db199SXin Li    def control_set(self, control):
374*9c5db199SXin Li        self.control = os.path.abspath(control)
375*9c5db199SXin Li
376*9c5db199SXin Li
377*9c5db199SXin Li    def harness_select(self, which, harness_args):
378*9c5db199SXin Li        self.harness = harness.select(which, self, harness_args)
379*9c5db199SXin Li
380*9c5db199SXin Li
381*9c5db199SXin Li    def setup_dirs(self, results_dir, tmp_dir):
382*9c5db199SXin Li        if not tmp_dir:
383*9c5db199SXin Li            tmp_dir = os.path.join(self.tmpdir, 'build')
384*9c5db199SXin Li        if not os.path.exists(tmp_dir):
385*9c5db199SXin Li            os.mkdir(tmp_dir)
386*9c5db199SXin Li        if not os.path.isdir(tmp_dir):
387*9c5db199SXin Li            e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
388*9c5db199SXin Li            raise ValueError(e_msg)
389*9c5db199SXin Li
390*9c5db199SXin Li        # We label the first build "build" and then subsequent ones
391*9c5db199SXin Li        # as "build.2", "build.3", etc. Whilst this is a little bit
392*9c5db199SXin Li        # inconsistent, 99.9% of jobs will only have one build
393*9c5db199SXin Li        # (that's not done as kernbench, sparse, or buildtest),
394*9c5db199SXin Li        # so it works out much cleaner. One of life's compromises.
395*9c5db199SXin Li        if not results_dir:
396*9c5db199SXin Li            results_dir = os.path.join(self.resultdir, 'build')
397*9c5db199SXin Li            i = 2
398*9c5db199SXin Li            while os.path.exists(results_dir):
399*9c5db199SXin Li                results_dir = os.path.join(self.resultdir, 'build.%d' % i)
400*9c5db199SXin Li                i += 1
401*9c5db199SXin Li        if not os.path.exists(results_dir):
402*9c5db199SXin Li            os.mkdir(results_dir)
403*9c5db199SXin Li
404*9c5db199SXin Li        return (results_dir, tmp_dir)
405*9c5db199SXin Li
406*9c5db199SXin Li
407*9c5db199SXin Li    def barrier(self, *args, **kwds):
408*9c5db199SXin Li        """Create a barrier object"""
409*9c5db199SXin Li        return barrier.barrier(*args, **kwds)
410*9c5db199SXin Li
411*9c5db199SXin Li
412*9c5db199SXin Li    def install_pkg(self, name, pkg_type, install_dir):
413*9c5db199SXin Li        '''
414*9c5db199SXin Li        This method is a simple wrapper around the actual package
415*9c5db199SXin Li        installation method in the Packager class. This is used
416*9c5db199SXin Li        internally by the profilers, deps and tests code.
417*9c5db199SXin Li        name : name of the package (ex: sleeptest, dbench etc.)
418*9c5db199SXin Li        pkg_type : Type of the package (ex: test, dep etc.)
419*9c5db199SXin Li        install_dir : The directory in which the source is actually
420*9c5db199SXin Li                      untarred into. (ex: client/profilers/<name> for profilers)
421*9c5db199SXin Li        '''
422*9c5db199SXin Li        if self.pkgmgr.repositories:
423*9c5db199SXin Li            self.pkgmgr.install_pkg(name, pkg_type, self.pkgdir, install_dir)
424*9c5db199SXin Li
425*9c5db199SXin Li
426*9c5db199SXin Li    def add_repository(self, repo_urls):
427*9c5db199SXin Li        '''
428*9c5db199SXin Li        Adds the repository locations to the job so that packages
429*9c5db199SXin Li        can be fetched from them when needed. The repository list
430*9c5db199SXin Li        needs to be a string list
431*9c5db199SXin Li        Ex: job.add_repository(['http://blah1','http://blah2'])
432*9c5db199SXin Li        '''
433*9c5db199SXin Li        for repo_url in repo_urls:
434*9c5db199SXin Li            self.pkgmgr.add_repository(repo_url)
435*9c5db199SXin Li
436*9c5db199SXin Li        # Fetch the packages' checksum file that contains the checksums
437*9c5db199SXin Li        # of all the packages if it is not already fetched. The checksum
438*9c5db199SXin Li        # is always fetched whenever a job is first started. This
439*9c5db199SXin Li        # is not done in the job's constructor as we don't have the list of
440*9c5db199SXin Li        # the repositories there (and obviously don't care about this file
441*9c5db199SXin Li        # if we are not using the repos)
442*9c5db199SXin Li        try:
443*9c5db199SXin Li            checksum_file_path = os.path.join(self.pkgmgr.pkgmgr_dir,
444*9c5db199SXin Li                                              packages.CHECKSUM_FILE)
445*9c5db199SXin Li            self.pkgmgr.fetch_pkg(packages.CHECKSUM_FILE,
446*9c5db199SXin Li                                  checksum_file_path, use_checksum=False)
447*9c5db199SXin Li        except error.PackageFetchError:
448*9c5db199SXin Li            # packaging system might not be working in this case
449*9c5db199SXin Li            # Silently fall back to the normal case
450*9c5db199SXin Li            pass
451*9c5db199SXin Li
452*9c5db199SXin Li
453*9c5db199SXin Li    def require_gcc(self):
454*9c5db199SXin Li        """
455*9c5db199SXin Li        Test whether gcc is installed on the machine.
456*9c5db199SXin Li        """
457*9c5db199SXin Li        # check if gcc is installed on the system.
458*9c5db199SXin Li        try:
459*9c5db199SXin Li            utils.system('which gcc')
460*9c5db199SXin Li        except error.CmdError:
461*9c5db199SXin Li            raise NotAvailableError('gcc is required by this job and is '
462*9c5db199SXin Li                                    'not available on the system')
463*9c5db199SXin Li
464*9c5db199SXin Li
465*9c5db199SXin Li    def setup_dep(self, deps):
466*9c5db199SXin Li        """Set up the dependencies for this test.
467*9c5db199SXin Li        deps is a list of libraries required for this test.
468*9c5db199SXin Li        """
469*9c5db199SXin Li        # Fetch the deps from the repositories and set them up.
470*9c5db199SXin Li        for dep in deps:
471*9c5db199SXin Li            dep_dir = os.path.join(self.autodir, 'deps', dep)
472*9c5db199SXin Li            # Search for the dependency in the repositories if specified,
473*9c5db199SXin Li            # else check locally.
474*9c5db199SXin Li            try:
475*9c5db199SXin Li                self.install_pkg(dep, 'dep', dep_dir)
476*9c5db199SXin Li            except error.PackageInstallError:
477*9c5db199SXin Li                # see if the dep is there locally
478*9c5db199SXin Li                pass
479*9c5db199SXin Li
480*9c5db199SXin Li            # dep_dir might not exist if it is not fetched from the repos
481*9c5db199SXin Li            if not os.path.exists(dep_dir):
482*9c5db199SXin Li                raise error.TestError("Dependency %s does not exist" % dep)
483*9c5db199SXin Li
484*9c5db199SXin Li            os.chdir(dep_dir)
485*9c5db199SXin Li            # Run the dependency, as it could create more files needed for the
486*9c5db199SXin Li            # tests.
487*9c5db199SXin Li            # In future this might want to be changed, as this always returns
488*9c5db199SXin Li            # None, unless the dep.py errors. In which case, it'll error rather
489*9c5db199SXin Li            # than returning.
490*9c5db199SXin Li            if eval(compile(open('%s.py' % dep, "rb").read(),
491*9c5db199SXin Li                            '%s.py' % dep, 'exec'), {}) is None:
492*9c5db199SXin Li                logging.info('Dependency %s successfuly built', dep)
493*9c5db199SXin Li
494*9c5db199SXin Li    def _runtest(self, url, tag, timeout, args, dargs):
495*9c5db199SXin Li        try:
496*9c5db199SXin Li            l = lambda : test.runtest(self, url, tag, args, dargs)
497*9c5db199SXin Li            pid = parallel.fork_start(self.resultdir, l)
498*9c5db199SXin Li
499*9c5db199SXin Li            self._forkwait(pid, timeout)
500*9c5db199SXin Li
501*9c5db199SXin Li        except error.TestBaseException:
502*9c5db199SXin Li            # These are already classified with an error type (exit_status)
503*9c5db199SXin Li            raise
504*9c5db199SXin Li        except error.JobError:
505*9c5db199SXin Li            raise  # Caught further up and turned into an ABORT.
506*9c5db199SXin Li        except Exception as e:
507*9c5db199SXin Li            # Converts all other exceptions thrown by the test regardless
508*9c5db199SXin Li            # of phase into a TestError(TestBaseException) subclass that
509*9c5db199SXin Li            # reports them with their full stack trace.
510*9c5db199SXin Li            raise error.UnhandledTestError(e)
511*9c5db199SXin Li
512*9c5db199SXin Li    def _forkwait(self, pid, timeout=None):
513*9c5db199SXin Li        """Wait for the given pid to complete
514*9c5db199SXin Li
515*9c5db199SXin Li        @param pid (int) process id to wait for
516*9c5db199SXin Li        @param timeout (int) seconds to wait before timing out the process"""
517*9c5db199SXin Li        if timeout:
518*9c5db199SXin Li            logging.debug('Waiting for pid %d for %d seconds', pid, timeout)
519*9c5db199SXin Li            parallel.fork_waitfor_timed(self.resultdir, pid, timeout)
520*9c5db199SXin Li        else:
521*9c5db199SXin Li            logging.debug('Waiting for pid %d', pid)
522*9c5db199SXin Li            parallel.fork_waitfor(self.resultdir, pid)
523*9c5db199SXin Li        logging.info('pid %d completed', pid)
524*9c5db199SXin Li
525*9c5db199SXin Li
526*9c5db199SXin Li    def _run_test_base(self, url, *args, **dargs):
527*9c5db199SXin Li        """
528*9c5db199SXin Li        Prepares arguments and run functions to run_test and run_test_detail.
529*9c5db199SXin Li
530*9c5db199SXin Li        @param url A url that identifies the test to run.
531*9c5db199SXin Li        @param tag An optional keyword argument that will be added to the
532*9c5db199SXin Li            test and subdir name.
533*9c5db199SXin Li        @param subdir_tag An optional keyword argument that will be added
534*9c5db199SXin Li            to the subdir name.
535*9c5db199SXin Li
536*9c5db199SXin Li        @returns:
537*9c5db199SXin Li                subdir: Test subdirectory
538*9c5db199SXin Li                testname: Test name
539*9c5db199SXin Li                group_func: Actual test run function
540*9c5db199SXin Li                timeout: Test timeout
541*9c5db199SXin Li        """
542*9c5db199SXin Li        _group, testname = self.pkgmgr.get_package_name(url, 'test')
543*9c5db199SXin Li        testname, subdir, tag = self._build_tagged_test_name(testname, dargs)
544*9c5db199SXin Li        self._make_test_outputdir(subdir)
545*9c5db199SXin Li
546*9c5db199SXin Li        timeout = dargs.pop('timeout', None)
547*9c5db199SXin Li        if timeout:
548*9c5db199SXin Li            logging.debug('Test has timeout: %d sec.', timeout)
549*9c5db199SXin Li
550*9c5db199SXin Li        def log_warning(reason):
551*9c5db199SXin Li            self.record("WARN", subdir, testname, reason)
552*9c5db199SXin Li        @disk_usage_monitor.watch(log_warning, "/", self._max_disk_usage_rate)
553*9c5db199SXin Li        def group_func():
554*9c5db199SXin Li            try:
555*9c5db199SXin Li                self._runtest(url, tag, timeout, args, dargs)
556*9c5db199SXin Li            except error.TestBaseException as detail:
557*9c5db199SXin Li                # The error is already classified, record it properly.
558*9c5db199SXin Li                self.record(detail.exit_status, subdir, testname, str(detail))
559*9c5db199SXin Li                raise
560*9c5db199SXin Li            else:
561*9c5db199SXin Li                self.record('GOOD', subdir, testname, 'completed successfully')
562*9c5db199SXin Li
563*9c5db199SXin Li        return (subdir, testname, group_func, timeout)
564*9c5db199SXin Li
565*9c5db199SXin Li
566*9c5db199SXin Li    @_run_test_complete_on_exit
567*9c5db199SXin Li    def run_test(self, url, *args, **dargs):
568*9c5db199SXin Li        """
569*9c5db199SXin Li        Summon a test object and run it.
570*9c5db199SXin Li
571*9c5db199SXin Li        @param url A url that identifies the test to run.
572*9c5db199SXin Li        @param tag An optional keyword argument that will be added to the
573*9c5db199SXin Li            test and subdir name.
574*9c5db199SXin Li        @param subdir_tag An optional keyword argument that will be added
575*9c5db199SXin Li            to the subdir name.
576*9c5db199SXin Li
577*9c5db199SXin Li        @returns True if the test passes, False otherwise.
578*9c5db199SXin Li        """
579*9c5db199SXin Li        (subdir, testname, group_func, timeout) = self._run_test_base(url,
580*9c5db199SXin Li                                                                      *args,
581*9c5db199SXin Li                                                                      **dargs)
582*9c5db199SXin Li        try:
583*9c5db199SXin Li            self._rungroup(subdir, testname, group_func, timeout)
584*9c5db199SXin Li            return True
585*9c5db199SXin Li        except error.TestBaseException:
586*9c5db199SXin Li            return False
587*9c5db199SXin Li        # Any other exception here will be given to the caller
588*9c5db199SXin Li        #
589*9c5db199SXin Li        # NOTE: The only exception possible from the control file here
590*9c5db199SXin Li        # is error.JobError as _runtest() turns all others into an
591*9c5db199SXin Li        # UnhandledTestError that is caught above.
592*9c5db199SXin Li
593*9c5db199SXin Li
594*9c5db199SXin Li    def stage_control_file(self, url):
595*9c5db199SXin Li        """
596*9c5db199SXin Li        Install the test package and return the control file path.
597*9c5db199SXin Li
598*9c5db199SXin Li        @param url The name of the test, e.g. login_LoginSuccess.  This is the
599*9c5db199SXin Li            string passed to run_test in the client test control file:
600*9c5db199SXin Li            job.run_test('login_LoginSuccess')
601*9c5db199SXin Li            This name can also be something like 'camera_HAL3.jea',
602*9c5db199SXin Li            which corresponds to a test package containing multiple
603*9c5db199SXin Li            control files, each with calls to:
604*9c5db199SXin Li            job.run_test('camera_HAL3', **opts)
605*9c5db199SXin Li
606*9c5db199SXin Li        @returns Absolute path to the control file for the test.
607*9c5db199SXin Li        """
608*9c5db199SXin Li        testname, _, _tag = url.partition('.')
609*9c5db199SXin Li        bindir = os.path.join(self.testdir, testname)
610*9c5db199SXin Li        self.install_pkg(testname, 'test', bindir)
611*9c5db199SXin Li        return _locate_test_control_file(bindir, url)
612*9c5db199SXin Li
613*9c5db199SXin Li
614*9c5db199SXin Li    @_run_test_complete_on_exit
615*9c5db199SXin Li    def run_test_detail(self, url, *args, **dargs):
616*9c5db199SXin Li        """
617*9c5db199SXin Li        Summon a test object and run it, returning test status.
618*9c5db199SXin Li
619*9c5db199SXin Li        @param url A url that identifies the test to run.
620*9c5db199SXin Li        @param tag An optional keyword argument that will be added to the
621*9c5db199SXin Li            test and subdir name.
622*9c5db199SXin Li        @param subdir_tag An optional keyword argument that will be added
623*9c5db199SXin Li            to the subdir name.
624*9c5db199SXin Li
625*9c5db199SXin Li        @returns Test status
626*9c5db199SXin Li        @see: client/common_lib/error.py, exit_status
627*9c5db199SXin Li        """
628*9c5db199SXin Li        (subdir, testname, group_func, timeout) = self._run_test_base(url,
629*9c5db199SXin Li                                                                      *args,
630*9c5db199SXin Li                                                                      **dargs)
631*9c5db199SXin Li        try:
632*9c5db199SXin Li            self._rungroup(subdir, testname, group_func, timeout)
633*9c5db199SXin Li            return 'GOOD'
634*9c5db199SXin Li        except error.TestBaseException as detail:
635*9c5db199SXin Li            return detail.exit_status
636*9c5db199SXin Li
637*9c5db199SXin Li
638*9c5db199SXin Li    def _rungroup(self, subdir, testname, function, timeout, *args, **dargs):
639*9c5db199SXin Li        """\
640*9c5db199SXin Li        subdir:
641*9c5db199SXin Li                name of the group
642*9c5db199SXin Li        testname:
643*9c5db199SXin Li                name of the test to run, or support step
644*9c5db199SXin Li        function:
645*9c5db199SXin Li                subroutine to run
646*9c5db199SXin Li        *args:
647*9c5db199SXin Li                arguments for the function
648*9c5db199SXin Li
649*9c5db199SXin Li        Returns the result of the passed in function
650*9c5db199SXin Li        """
651*9c5db199SXin Li
652*9c5db199SXin Li        try:
653*9c5db199SXin Li            optional_fields = None
654*9c5db199SXin Li            if timeout:
655*9c5db199SXin Li                optional_fields = {}
656*9c5db199SXin Li                optional_fields['timeout'] = timeout
657*9c5db199SXin Li            self.record('START', subdir, testname,
658*9c5db199SXin Li                        optional_fields=optional_fields)
659*9c5db199SXin Li
660*9c5db199SXin Li            self._state.set('client', 'unexpected_reboot', (subdir, testname))
661*9c5db199SXin Li            try:
662*9c5db199SXin Li                result = function(*args, **dargs)
663*9c5db199SXin Li                self.record('END GOOD', subdir, testname)
664*9c5db199SXin Li                return result
665*9c5db199SXin Li            except error.TestBaseException as e:
666*9c5db199SXin Li                self.record('END %s' % e.exit_status, subdir, testname)
667*9c5db199SXin Li                raise
668*9c5db199SXin Li            except error.JobError as e:
669*9c5db199SXin Li                self.record('END ABORT', subdir, testname)
670*9c5db199SXin Li                raise
671*9c5db199SXin Li            except Exception as e:
672*9c5db199SXin Li                # This should only ever happen due to a bug in the given
673*9c5db199SXin Li                # function's code.  The common case of being called by
674*9c5db199SXin Li                # run_test() will never reach this.  If a control file called
675*9c5db199SXin Li                # run_group() itself, bugs in its function will be caught
676*9c5db199SXin Li                # here.
677*9c5db199SXin Li                err_msg = str(e) + '\n' + traceback.format_exc()
678*9c5db199SXin Li                self.record('END ERROR', subdir, testname, err_msg)
679*9c5db199SXin Li                raise
680*9c5db199SXin Li        finally:
681*9c5db199SXin Li            self._state.discard('client', 'unexpected_reboot')
682*9c5db199SXin Li
683*9c5db199SXin Li
684*9c5db199SXin Li    def run_group(self, function, tag=None, **dargs):
685*9c5db199SXin Li        """
686*9c5db199SXin Li        Run a function nested within a group level.
687*9c5db199SXin Li
688*9c5db199SXin Li        function:
689*9c5db199SXin Li                Callable to run.
690*9c5db199SXin Li        tag:
691*9c5db199SXin Li                An optional tag name for the group.  If None (default)
692*9c5db199SXin Li                function.__name__ will be used.
693*9c5db199SXin Li        **dargs:
694*9c5db199SXin Li                Named arguments for the function.
695*9c5db199SXin Li        """
696*9c5db199SXin Li        if tag:
697*9c5db199SXin Li            name = tag
698*9c5db199SXin Li        else:
699*9c5db199SXin Li            name = function.__name__
700*9c5db199SXin Li
701*9c5db199SXin Li        try:
702*9c5db199SXin Li            return self._rungroup(subdir=None, testname=name,
703*9c5db199SXin Li                                  function=function, timeout=None, **dargs)
704*9c5db199SXin Li        except (SystemExit, error.TestBaseException):
705*9c5db199SXin Li            raise
706*9c5db199SXin Li        # If there was a different exception, turn it into a TestError.
707*9c5db199SXin Li        # It will be caught by step_engine or _run_step_fn.
708*9c5db199SXin Li        except Exception as e:
709*9c5db199SXin Li            raise error.UnhandledTestError(e)
710*9c5db199SXin Li
711*9c5db199SXin Li
712*9c5db199SXin Li    def cpu_count(self):
713*9c5db199SXin Li        return utils.count_cpus()  # use total system count
714*9c5db199SXin Li
715*9c5db199SXin Li
716*9c5db199SXin Li    def start_reboot(self):
717*9c5db199SXin Li        self.record('START', None, 'reboot')
718*9c5db199SXin Li        self.record('GOOD', None, 'reboot.start')
719*9c5db199SXin Li
720*9c5db199SXin Li
721*9c5db199SXin Li    def _record_reboot_failure(self, subdir, operation, status,
722*9c5db199SXin Li                               running_id=None):
723*9c5db199SXin Li        self.record("ABORT", subdir, operation, status)
724*9c5db199SXin Li        if not running_id:
725*9c5db199SXin Li            running_id = utils.running_os_ident()
726*9c5db199SXin Li        kernel = {"kernel": running_id.split("::")[0]}
727*9c5db199SXin Li        self.record("END ABORT", subdir, 'reboot', optional_fields=kernel)
728*9c5db199SXin Li
729*9c5db199SXin Li
730*9c5db199SXin Li    def _check_post_reboot(self, subdir, running_id=None):
731*9c5db199SXin Li        """
732*9c5db199SXin Li        Function to perform post boot checks such as if the system configuration
733*9c5db199SXin Li        has changed across reboots (specifically, CPUs and partitions).
734*9c5db199SXin Li
735*9c5db199SXin Li        @param subdir: The subdir to use in the job.record call.
736*9c5db199SXin Li        @param running_id: An optional running_id to include in the reboot
737*9c5db199SXin Li            failure log message
738*9c5db199SXin Li
739*9c5db199SXin Li        @raise JobError: Raised if the current configuration does not match the
740*9c5db199SXin Li            pre-reboot configuration.
741*9c5db199SXin Li        """
742*9c5db199SXin Li        # check to see if any partitions have changed
743*9c5db199SXin Li        partition_list = partition_lib.get_partition_list(self,
744*9c5db199SXin Li                                                          exclude_swap=False)
745*9c5db199SXin Li        mount_info = partition_lib.get_mount_info(partition_list)
746*9c5db199SXin Li        old_mount_info = self._state.get('client', 'mount_info')
747*9c5db199SXin Li        if mount_info != old_mount_info:
748*9c5db199SXin Li            new_entries = mount_info - old_mount_info
749*9c5db199SXin Li            old_entries = old_mount_info - mount_info
750*9c5db199SXin Li            description = ("mounted partitions are different after reboot "
751*9c5db199SXin Li                           "(old entries: %s, new entries: %s)" %
752*9c5db199SXin Li                           (old_entries, new_entries))
753*9c5db199SXin Li            self._record_reboot_failure(subdir, "reboot.verify_config",
754*9c5db199SXin Li                                        description, running_id=running_id)
755*9c5db199SXin Li            raise error.JobError("Reboot failed: %s" % description)
756*9c5db199SXin Li
757*9c5db199SXin Li        # check to see if any CPUs have changed
758*9c5db199SXin Li        cpu_count = utils.count_cpus()
759*9c5db199SXin Li        old_count = self._state.get('client', 'cpu_count')
760*9c5db199SXin Li        if cpu_count != old_count:
761*9c5db199SXin Li            description = ('Number of CPUs changed after reboot '
762*9c5db199SXin Li                           '(old count: %d, new count: %d)' %
763*9c5db199SXin Li                           (old_count, cpu_count))
764*9c5db199SXin Li            self._record_reboot_failure(subdir, 'reboot.verify_config',
765*9c5db199SXin Li                                        description, running_id=running_id)
766*9c5db199SXin Li            raise error.JobError('Reboot failed: %s' % description)
767*9c5db199SXin Li
768*9c5db199SXin Li
769*9c5db199SXin Li    def partition(self, device, loop_size=0, mountpoint=None):
770*9c5db199SXin Li        """
771*9c5db199SXin Li        Work with a machine partition
772*9c5db199SXin Li
773*9c5db199SXin Li            @param device: e.g. /dev/sda2, /dev/sdb1 etc...
774*9c5db199SXin Li            @param mountpoint: Specify a directory to mount to. If not specified
775*9c5db199SXin Li                               autotest tmp directory will be used.
776*9c5db199SXin Li            @param loop_size: Size of loopback device (in MB). Defaults to 0.
777*9c5db199SXin Li
778*9c5db199SXin Li            @return: A L{client.bin.partition.partition} object
779*9c5db199SXin Li        """
780*9c5db199SXin Li
781*9c5db199SXin Li        if not mountpoint:
782*9c5db199SXin Li            mountpoint = self.tmpdir
783*9c5db199SXin Li        return partition_lib.partition(self, device, loop_size, mountpoint)
784*9c5db199SXin Li
785*9c5db199SXin Li    @utils.deprecated
786*9c5db199SXin Li    def filesystem(self, device, mountpoint=None, loop_size=0):
787*9c5db199SXin Li        """ Same as partition
788*9c5db199SXin Li
789*9c5db199SXin Li        @deprecated: Use partition method instead
790*9c5db199SXin Li        """
791*9c5db199SXin Li        return self.partition(device, loop_size, mountpoint)
792*9c5db199SXin Li
793*9c5db199SXin Li
794*9c5db199SXin Li    def enable_external_logging(self):
795*9c5db199SXin Li        pass
796*9c5db199SXin Li
797*9c5db199SXin Li
798*9c5db199SXin Li    def disable_external_logging(self):
799*9c5db199SXin Li        pass
800*9c5db199SXin Li
801*9c5db199SXin Li
802*9c5db199SXin Li    def reboot_setup(self):
803*9c5db199SXin Li        # save the partition list and mount points, as well as the cpu count
804*9c5db199SXin Li        partition_list = partition_lib.get_partition_list(self,
805*9c5db199SXin Li                                                          exclude_swap=False)
806*9c5db199SXin Li        mount_info = partition_lib.get_mount_info(partition_list)
807*9c5db199SXin Li        self._state.set('client', 'mount_info', mount_info)
808*9c5db199SXin Li        self._state.set('client', 'cpu_count', utils.count_cpus())
809*9c5db199SXin Li
810*9c5db199SXin Li
811*9c5db199SXin Li    def reboot(self):
812*9c5db199SXin Li        self.reboot_setup()
813*9c5db199SXin Li        self.harness.run_reboot()
814*9c5db199SXin Li
815*9c5db199SXin Li        # HACK: using this as a module sometimes hangs shutdown, so if it's
816*9c5db199SXin Li        # installed unload it first
817*9c5db199SXin Li        utils.system("modprobe -r netconsole", ignore_status=True)
818*9c5db199SXin Li
819*9c5db199SXin Li        # sync first, so that a sync during shutdown doesn't time out
820*9c5db199SXin Li        utils.system("sync; sync", ignore_status=True)
821*9c5db199SXin Li
822*9c5db199SXin Li        utils.system("(sleep 5; reboot) </dev/null >/dev/null 2>&1 &")
823*9c5db199SXin Li        self.quit()
824*9c5db199SXin Li
825*9c5db199SXin Li
826*9c5db199SXin Li    def noop(self, text):
827*9c5db199SXin Li        logging.info("job: noop: " + text)
828*9c5db199SXin Li
829*9c5db199SXin Li
830*9c5db199SXin Li    @_run_test_complete_on_exit
831*9c5db199SXin Li    def parallel(self, *tasklist, **kwargs):
832*9c5db199SXin Li        """Run tasks in parallel"""
833*9c5db199SXin Li
834*9c5db199SXin Li        pids = []
835*9c5db199SXin Li        old_log_filename = self._logger.global_filename
836*9c5db199SXin Li        for i, task in enumerate(tasklist):
837*9c5db199SXin Li            assert isinstance(task, (tuple, list))
838*9c5db199SXin Li            self._logger.global_filename = old_log_filename + (".%d" % i)
839*9c5db199SXin Li            def task_func():
840*9c5db199SXin Li                # stub out _record_indent with a process-local one
841*9c5db199SXin Li                base_record_indent = self._record_indent
842*9c5db199SXin Li                proc_local = self._job_state.property_factory(
843*9c5db199SXin Li                    '_state', '_record_indent.%d' % os.getpid(),
844*9c5db199SXin Li                    base_record_indent, namespace='client')
845*9c5db199SXin Li                self.__class__._record_indent = proc_local
846*9c5db199SXin Li                task[0](*task[1:])
847*9c5db199SXin Li            forked_pid = parallel.fork_start(self.resultdir, task_func)
848*9c5db199SXin Li            logging.info('Just forked pid %d', forked_pid)
849*9c5db199SXin Li            pids.append(forked_pid)
850*9c5db199SXin Li
851*9c5db199SXin Li        old_log_path = os.path.join(self.resultdir, old_log_filename)
852*9c5db199SXin Li        old_log = open(old_log_path, "a")
853*9c5db199SXin Li        exceptions = []
854*9c5db199SXin Li        for i, pid in enumerate(pids):
855*9c5db199SXin Li            # wait for the task to finish
856*9c5db199SXin Li            try:
857*9c5db199SXin Li                self._forkwait(pid, kwargs.get('timeout'))
858*9c5db199SXin Li            except Exception as e:
859*9c5db199SXin Li                logging.info('pid %d completed with error', pid)
860*9c5db199SXin Li                exceptions.append(e)
861*9c5db199SXin Li            # copy the logs from the subtask into the main log
862*9c5db199SXin Li            new_log_path = old_log_path + (".%d" % i)
863*9c5db199SXin Li            if os.path.exists(new_log_path):
864*9c5db199SXin Li                new_log = open(new_log_path)
865*9c5db199SXin Li                old_log.write(new_log.read())
866*9c5db199SXin Li                new_log.close()
867*9c5db199SXin Li                old_log.flush()
868*9c5db199SXin Li                os.remove(new_log_path)
869*9c5db199SXin Li        old_log.close()
870*9c5db199SXin Li
871*9c5db199SXin Li        self._logger.global_filename = old_log_filename
872*9c5db199SXin Li
873*9c5db199SXin Li        # handle any exceptions raised by the parallel tasks
874*9c5db199SXin Li        if exceptions:
875*9c5db199SXin Li            msg = "%d task(s) failed in job.parallel" % len(exceptions)
876*9c5db199SXin Li            raise error.JobError(msg)
877*9c5db199SXin Li
878*9c5db199SXin Li
879*9c5db199SXin Li    def quit(self):
880*9c5db199SXin Li        # XXX: should have a better name.
881*9c5db199SXin Li        self.harness.run_pause()
882*9c5db199SXin Li        raise error.JobContinue("more to come")
883*9c5db199SXin Li
884*9c5db199SXin Li
885*9c5db199SXin Li    def complete(self, status):
886*9c5db199SXin Li        """Write pending reports, clean up, and exit"""
887*9c5db199SXin Li        # We are about to exit 'complete' so clean up the control file.
888*9c5db199SXin Li        dest = os.path.join(self.resultdir, os.path.basename(self._state_file))
889*9c5db199SXin Li        shutil.move(self._state_file, dest)
890*9c5db199SXin Li
891*9c5db199SXin Li        self.harness.run_complete()
892*9c5db199SXin Li        self.disable_external_logging()
893*9c5db199SXin Li        sys.exit(status)
894*9c5db199SXin Li
895*9c5db199SXin Li
896*9c5db199SXin Li    def _load_state(self):
897*9c5db199SXin Li        # grab any initial state and set up $CONTROL.state as the backing file
898*9c5db199SXin Li        init_state_file = self.control + '.init.state'
899*9c5db199SXin Li        self._state_file = self.control + '.state'
900*9c5db199SXin Li        if os.path.exists(init_state_file):
901*9c5db199SXin Li            shutil.move(init_state_file, self._state_file)
902*9c5db199SXin Li        self._state.set_backing_file(self._state_file)
903*9c5db199SXin Li
904*9c5db199SXin Li        # initialize the state engine, if necessary
905*9c5db199SXin Li        has_steps = self._state.has('client', 'steps')
906*9c5db199SXin Li        if not self._is_continuation and has_steps:
907*9c5db199SXin Li            raise RuntimeError('Loaded state can only contain client.steps if '
908*9c5db199SXin Li                               'this is a continuation')
909*9c5db199SXin Li
910*9c5db199SXin Li        if not has_steps:
911*9c5db199SXin Li            logging.debug('Initializing the state engine')
912*9c5db199SXin Li            self._state.set('client', 'steps', [])
913*9c5db199SXin Li
914*9c5db199SXin Li
915*9c5db199SXin Li    def handle_persistent_option(self, options, option_name):
916*9c5db199SXin Li        """
917*9c5db199SXin Li        Select option from command line or persistent state.
918*9c5db199SXin Li        Store selected option to allow standalone client to continue
919*9c5db199SXin Li        after reboot with previously selected options.
920*9c5db199SXin Li        Priority:
921*9c5db199SXin Li        1. explicitly specified via command line
922*9c5db199SXin Li        2. stored in state file (if continuing job '-c')
923*9c5db199SXin Li        3. default == None
924*9c5db199SXin Li        """
925*9c5db199SXin Li        option = None
926*9c5db199SXin Li        cmd_line_option = getattr(options, option_name)
927*9c5db199SXin Li        if cmd_line_option:
928*9c5db199SXin Li            option = cmd_line_option
929*9c5db199SXin Li            self._state.set('client', option_name, option)
930*9c5db199SXin Li        else:
931*9c5db199SXin Li            stored_option = self._state.get('client', option_name, None)
932*9c5db199SXin Li            if stored_option:
933*9c5db199SXin Li                option = stored_option
934*9c5db199SXin Li        logging.debug('Persistent option %s now set to %s', option_name, option)
935*9c5db199SXin Li        return option
936*9c5db199SXin Li
937*9c5db199SXin Li
938*9c5db199SXin Li    def __create_step_tuple(self, fn, args, dargs):
939*9c5db199SXin Li        # Legacy code passes in an array where the first arg is
940*9c5db199SXin Li        # the function or its name.
941*9c5db199SXin Li        if isinstance(fn, list):
942*9c5db199SXin Li            assert(len(args) == 0)
943*9c5db199SXin Li            assert(len(dargs) == 0)
944*9c5db199SXin Li            args = fn[1:]
945*9c5db199SXin Li            fn = fn[0]
946*9c5db199SXin Li        # Pickling actual functions is hairy, thus we have to call
947*9c5db199SXin Li        # them by name.  Unfortunately, this means only functions
948*9c5db199SXin Li        # defined globally can be used as a next step.
949*9c5db199SXin Li        if callable(fn):
950*9c5db199SXin Li            fn = fn.__name__
951*9c5db199SXin Li        if not isinstance(fn, six.string_types):
952*9c5db199SXin Li            raise StepError("Next steps must be functions or "
953*9c5db199SXin Li                            "strings containing the function name")
954*9c5db199SXin Li        ancestry = copy.copy(self._current_step_ancestry)
955*9c5db199SXin Li        return (ancestry, fn, args, dargs)
956*9c5db199SXin Li
957*9c5db199SXin Li
958*9c5db199SXin Li    def next_step_append(self, fn, *args, **dargs):
959*9c5db199SXin Li        """Define the next step and place it at the end"""
960*9c5db199SXin Li        steps = self._state.get('client', 'steps')
961*9c5db199SXin Li        steps.append(self.__create_step_tuple(fn, args, dargs))
962*9c5db199SXin Li        self._state.set('client', 'steps', steps)
963*9c5db199SXin Li
964*9c5db199SXin Li
965*9c5db199SXin Li    def next_step(self, fn, *args, **dargs):
966*9c5db199SXin Li        """Create a new step and place it after any steps added
967*9c5db199SXin Li        while running the current step but before any steps added in
968*9c5db199SXin Li        previous steps"""
969*9c5db199SXin Li        steps = self._state.get('client', 'steps')
970*9c5db199SXin Li        steps.insert(self._next_step_index,
971*9c5db199SXin Li                     self.__create_step_tuple(fn, args, dargs))
972*9c5db199SXin Li        self._next_step_index += 1
973*9c5db199SXin Li        self._state.set('client', 'steps', steps)
974*9c5db199SXin Li
975*9c5db199SXin Li
976*9c5db199SXin Li    def next_step_prepend(self, fn, *args, **dargs):
977*9c5db199SXin Li        """Insert a new step, executing first"""
978*9c5db199SXin Li        steps = self._state.get('client', 'steps')
979*9c5db199SXin Li        steps.insert(0, self.__create_step_tuple(fn, args, dargs))
980*9c5db199SXin Li        self._next_step_index += 1
981*9c5db199SXin Li        self._state.set('client', 'steps', steps)
982*9c5db199SXin Li
983*9c5db199SXin Li
984*9c5db199SXin Li
985*9c5db199SXin Li    def _run_step_fn(self, local_vars, fn, args, dargs):
986*9c5db199SXin Li        """Run a (step) function within the given context"""
987*9c5db199SXin Li
988*9c5db199SXin Li        local_vars['__args'] = args
989*9c5db199SXin Li        local_vars['__dargs'] = dargs
990*9c5db199SXin Li        try:
991*9c5db199SXin Li            exec('__ret = %s(*__args, **__dargs)' % fn, local_vars, local_vars)
992*9c5db199SXin Li            return local_vars['__ret']
993*9c5db199SXin Li        except SystemExit:
994*9c5db199SXin Li            raise  # Send error.JobContinue and JobComplete on up to runjob.
995*9c5db199SXin Li        except error.TestNAError as detail:
996*9c5db199SXin Li            self.record(detail.exit_status, None, fn, str(detail))
997*9c5db199SXin Li        except Exception as detail:
998*9c5db199SXin Li            raise error.UnhandledJobError(detail)
999*9c5db199SXin Li
1000*9c5db199SXin Li
1001*9c5db199SXin Li    def _create_frame(self, global_vars, ancestry, fn_name):
1002*9c5db199SXin Li        """Set up the environment like it would have been when this
1003*9c5db199SXin Li        function was first defined.
1004*9c5db199SXin Li
1005*9c5db199SXin Li        Child step engine 'implementations' must have 'return locals()'
1006*9c5db199SXin Li        at end end of their steps.  Because of this, we can call the
1007*9c5db199SXin Li        parent function and get back all child functions (i.e. those
1008*9c5db199SXin Li        defined within it).
1009*9c5db199SXin Li
1010*9c5db199SXin Li        Unfortunately, the call stack of the function calling
1011*9c5db199SXin Li        job.next_step might have been deeper than the function it
1012*9c5db199SXin Li        added.  In order to make sure that the environment is what it
1013*9c5db199SXin Li        should be, we need to then pop off the frames we built until
1014*9c5db199SXin Li        we find the frame where the function was first defined."""
1015*9c5db199SXin Li
1016*9c5db199SXin Li        # The copies ensure that the parent frames are not modified
1017*9c5db199SXin Li        # while building child frames.  This matters if we then
1018*9c5db199SXin Li        # pop some frames in the next part of this function.
1019*9c5db199SXin Li        current_frame = copy.copy(global_vars)
1020*9c5db199SXin Li        frames = [current_frame]
1021*9c5db199SXin Li        for steps_fn_name in ancestry:
1022*9c5db199SXin Li            ret = self._run_step_fn(current_frame, steps_fn_name, [], {})
1023*9c5db199SXin Li            current_frame = copy.copy(ret)
1024*9c5db199SXin Li            frames.append(current_frame)
1025*9c5db199SXin Li
1026*9c5db199SXin Li        # Walk up the stack frames until we find the place fn_name was defined.
1027*9c5db199SXin Li        while len(frames) > 2:
1028*9c5db199SXin Li            if fn_name not in frames[-2]:
1029*9c5db199SXin Li                break
1030*9c5db199SXin Li            if frames[-2][fn_name] != frames[-1][fn_name]:
1031*9c5db199SXin Li                break
1032*9c5db199SXin Li            frames.pop()
1033*9c5db199SXin Li            ancestry.pop()
1034*9c5db199SXin Li
1035*9c5db199SXin Li        return (frames[-1], ancestry)
1036*9c5db199SXin Li
1037*9c5db199SXin Li
1038*9c5db199SXin Li    def _add_step_init(self, local_vars, current_function):
1039*9c5db199SXin Li        """If the function returned a dictionary that includes a
1040*9c5db199SXin Li        function named 'step_init', prepend it to our list of steps.
1041*9c5db199SXin Li        This will only get run the first time a function with a nested
1042*9c5db199SXin Li        use of the step engine is run."""
1043*9c5db199SXin Li
1044*9c5db199SXin Li        if (isinstance(local_vars, dict) and
1045*9c5db199SXin Li            'step_init' in local_vars and
1046*9c5db199SXin Li            callable(local_vars['step_init'])):
1047*9c5db199SXin Li            # The init step is a child of the function
1048*9c5db199SXin Li            # we were just running.
1049*9c5db199SXin Li            self._current_step_ancestry.append(current_function)
1050*9c5db199SXin Li            self.next_step_prepend('step_init')
1051*9c5db199SXin Li
1052*9c5db199SXin Li
1053*9c5db199SXin Li    def step_engine(self):
1054*9c5db199SXin Li        """The multi-run engine used when the control file defines step_init.
1055*9c5db199SXin Li
1056*9c5db199SXin Li        Does the next step.
1057*9c5db199SXin Li        """
1058*9c5db199SXin Li
1059*9c5db199SXin Li        # Set up the environment and then interpret the control file.
1060*9c5db199SXin Li        # Some control files will have code outside of functions,
1061*9c5db199SXin Li        # which means we need to have our state engine initialized
1062*9c5db199SXin Li        # before reading in the file.
1063*9c5db199SXin Li        global_control_vars = {'job': self,
1064*9c5db199SXin Li                               'args': self.args}
1065*9c5db199SXin Li        exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
1066*9c5db199SXin Li        try:
1067*9c5db199SXin Li            exec(compile(open(self.control, "rb").read(), self.control, 'exec'),
1068*9c5db199SXin Li                 global_control_vars, global_control_vars)
1069*9c5db199SXin Li        except error.TestNAError as detail:
1070*9c5db199SXin Li            self.record(detail.exit_status, None, self.control, str(detail))
1071*9c5db199SXin Li        except SystemExit:
1072*9c5db199SXin Li            raise  # Send error.JobContinue and JobComplete on up to runjob.
1073*9c5db199SXin Li        except Exception as detail:
1074*9c5db199SXin Li            # Syntax errors or other general Python exceptions coming out of
1075*9c5db199SXin Li            # the top level of the control file itself go through here.
1076*9c5db199SXin Li            raise error.UnhandledJobError(detail)
1077*9c5db199SXin Li
1078*9c5db199SXin Li        # If we loaded in a mid-job state file, then we presumably
1079*9c5db199SXin Li        # know what steps we have yet to run.
1080*9c5db199SXin Li        if not self._is_continuation:
1081*9c5db199SXin Li            if 'step_init' in global_control_vars:
1082*9c5db199SXin Li                self.next_step(global_control_vars['step_init'])
1083*9c5db199SXin Li        else:
1084*9c5db199SXin Li            # if last job failed due to unexpected reboot, record it as fail
1085*9c5db199SXin Li            # so harness gets called
1086*9c5db199SXin Li            last_job = self._state.get('client', 'unexpected_reboot', None)
1087*9c5db199SXin Li            if last_job:
1088*9c5db199SXin Li                subdir, testname = last_job
1089*9c5db199SXin Li                self.record('FAIL', subdir, testname, 'unexpected reboot')
1090*9c5db199SXin Li                self.record('END FAIL', subdir, testname)
1091*9c5db199SXin Li
1092*9c5db199SXin Li        # Iterate through the steps.  If we reboot, we'll simply
1093*9c5db199SXin Li        # continue iterating on the next step.
1094*9c5db199SXin Li        while len(self._state.get('client', 'steps')) > 0:
1095*9c5db199SXin Li            steps = self._state.get('client', 'steps')
1096*9c5db199SXin Li            (ancestry, fn_name, args, dargs) = steps.pop(0)
1097*9c5db199SXin Li            self._state.set('client', 'steps', steps)
1098*9c5db199SXin Li
1099*9c5db199SXin Li            self._next_step_index = 0
1100*9c5db199SXin Li            ret = self._create_frame(global_control_vars, ancestry, fn_name)
1101*9c5db199SXin Li            local_vars, self._current_step_ancestry = ret
1102*9c5db199SXin Li            local_vars = self._run_step_fn(local_vars, fn_name, args, dargs)
1103*9c5db199SXin Li            self._add_step_init(local_vars, fn_name)
1104*9c5db199SXin Li
1105*9c5db199SXin Li
1106*9c5db199SXin Li    def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
1107*9c5db199SXin Li        self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
1108*9c5db199SXin Li                                   on_every_test)
1109*9c5db199SXin Li
1110*9c5db199SXin Li
1111*9c5db199SXin Li    def add_sysinfo_logfile(self, file, on_every_test=False):
1112*9c5db199SXin Li        self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
1113*9c5db199SXin Li
1114*9c5db199SXin Li
1115*9c5db199SXin Li    def _add_sysinfo_loggable(self, loggable, on_every_test):
1116*9c5db199SXin Li        if on_every_test:
1117*9c5db199SXin Li            self.sysinfo.test_loggables.add(loggable)
1118*9c5db199SXin Li        else:
1119*9c5db199SXin Li            self.sysinfo.boot_loggables.add(loggable)
1120*9c5db199SXin Li        self._save_sysinfo_state()
1121*9c5db199SXin Li
1122*9c5db199SXin Li
1123*9c5db199SXin Li    def _load_sysinfo_state(self):
1124*9c5db199SXin Li        state = self._state.get('client', 'sysinfo', None)
1125*9c5db199SXin Li        if state:
1126*9c5db199SXin Li            self.sysinfo.deserialize(state)
1127*9c5db199SXin Li
1128*9c5db199SXin Li
1129*9c5db199SXin Li    def _save_sysinfo_state(self):
1130*9c5db199SXin Li        state = self.sysinfo.serialize()
1131*9c5db199SXin Li        self._state.set('client', 'sysinfo', state)
1132*9c5db199SXin Li
1133*9c5db199SXin Li
1134*9c5db199SXin Liclass disk_usage_monitor:
1135*9c5db199SXin Li    def __init__(self, logging_func, device, max_mb_per_hour):
1136*9c5db199SXin Li        self.func = logging_func
1137*9c5db199SXin Li        self.device = device
1138*9c5db199SXin Li        self.max_mb_per_hour = max_mb_per_hour
1139*9c5db199SXin Li
1140*9c5db199SXin Li
1141*9c5db199SXin Li    def start(self):
1142*9c5db199SXin Li        self.initial_space = utils.freespace(self.device)
1143*9c5db199SXin Li        self.start_time = time.time()
1144*9c5db199SXin Li
1145*9c5db199SXin Li
1146*9c5db199SXin Li    def stop(self):
1147*9c5db199SXin Li        # if no maximum usage rate was set, we don't need to
1148*9c5db199SXin Li        # generate any warnings
1149*9c5db199SXin Li        if not self.max_mb_per_hour:
1150*9c5db199SXin Li            return
1151*9c5db199SXin Li
1152*9c5db199SXin Li        final_space = utils.freespace(self.device)
1153*9c5db199SXin Li        used_space = self.initial_space - final_space
1154*9c5db199SXin Li        stop_time = time.time()
1155*9c5db199SXin Li        total_time = stop_time - self.start_time
1156*9c5db199SXin Li        # round up the time to one minute, to keep extremely short
1157*9c5db199SXin Li        # tests from generating false positives due to short, badly
1158*9c5db199SXin Li        # timed bursts of activity
1159*9c5db199SXin Li        total_time = max(total_time, 60.0)
1160*9c5db199SXin Li
1161*9c5db199SXin Li        # determine the usage rate
1162*9c5db199SXin Li        bytes_per_sec = used_space / total_time
1163*9c5db199SXin Li        mb_per_sec = bytes_per_sec / 1024**2
1164*9c5db199SXin Li        mb_per_hour = mb_per_sec * 60 * 60
1165*9c5db199SXin Li
1166*9c5db199SXin Li        if mb_per_hour > self.max_mb_per_hour:
1167*9c5db199SXin Li            msg = ("disk space on %s was consumed at a rate of %.2f MB/hour")
1168*9c5db199SXin Li            msg %= (self.device, mb_per_hour)
1169*9c5db199SXin Li            self.func(msg)
1170*9c5db199SXin Li
1171*9c5db199SXin Li
1172*9c5db199SXin Li    @classmethod
1173*9c5db199SXin Li    def watch(cls, *monitor_args, **monitor_dargs):
1174*9c5db199SXin Li        """ Generic decorator to wrap a function call with the
1175*9c5db199SXin Li        standard create-monitor -> start -> call -> stop idiom."""
1176*9c5db199SXin Li        def decorator(func):
1177*9c5db199SXin Li            def watched_func(*args, **dargs):
1178*9c5db199SXin Li                monitor = cls(*monitor_args, **monitor_dargs)
1179*9c5db199SXin Li                monitor.start()
1180*9c5db199SXin Li                try:
1181*9c5db199SXin Li                    func(*args, **dargs)
1182*9c5db199SXin Li                finally:
1183*9c5db199SXin Li                    monitor.stop()
1184*9c5db199SXin Li            return watched_func
1185*9c5db199SXin Li        return decorator
1186*9c5db199SXin Li
1187*9c5db199SXin Li
1188*9c5db199SXin Lidef runjob(control, drop_caches, options):
1189*9c5db199SXin Li    """
1190*9c5db199SXin Li    Run a job using the given control file.
1191*9c5db199SXin Li
1192*9c5db199SXin Li    This is the main interface to this module.
1193*9c5db199SXin Li
1194*9c5db199SXin Li    @see base_job.__init__ for parameter info.
1195*9c5db199SXin Li    """
1196*9c5db199SXin Li    control = os.path.abspath(control)
1197*9c5db199SXin Li    state = control + '.state'
1198*9c5db199SXin Li    # Ensure state file is cleaned up before the job starts to run if autotest
1199*9c5db199SXin Li    # is not running with the --continue flag
1200*9c5db199SXin Li    if not options.cont and os.path.isfile(state):
1201*9c5db199SXin Li        logging.debug('Cleaning up previously found state file')
1202*9c5db199SXin Li        os.remove(state)
1203*9c5db199SXin Li
1204*9c5db199SXin Li    # instantiate the job object ready for the control file.
1205*9c5db199SXin Li    myjob = None
1206*9c5db199SXin Li    try:
1207*9c5db199SXin Li        # Check that the control file is valid
1208*9c5db199SXin Li        if not os.path.exists(control):
1209*9c5db199SXin Li            raise error.JobError(control + ": control file not found")
1210*9c5db199SXin Li
1211*9c5db199SXin Li        # When continuing, the job is complete when there is no
1212*9c5db199SXin Li        # state file, ensure we don't try and continue.
1213*9c5db199SXin Li        if options.cont and not os.path.exists(state):
1214*9c5db199SXin Li            raise error.JobComplete("all done")
1215*9c5db199SXin Li
1216*9c5db199SXin Li        myjob = job(control=control, drop_caches=drop_caches, options=options)
1217*9c5db199SXin Li
1218*9c5db199SXin Li        # Load in the users control file, may do any one of:
1219*9c5db199SXin Li        #  1) execute in toto
1220*9c5db199SXin Li        #  2) define steps, and select the first via next_step()
1221*9c5db199SXin Li        myjob.step_engine()
1222*9c5db199SXin Li
1223*9c5db199SXin Li    except error.JobContinue:
1224*9c5db199SXin Li        sys.exit(5)
1225*9c5db199SXin Li
1226*9c5db199SXin Li    except error.JobComplete:
1227*9c5db199SXin Li        sys.exit(1)
1228*9c5db199SXin Li
1229*9c5db199SXin Li    except error.JobError as instance:
1230*9c5db199SXin Li        logging.error("JOB ERROR: " + str(instance))
1231*9c5db199SXin Li        if myjob:
1232*9c5db199SXin Li            command = None
1233*9c5db199SXin Li            if len(instance.args) > 1:
1234*9c5db199SXin Li                command = instance.args[1]
1235*9c5db199SXin Li                myjob.record('ABORT', None, command, str(instance))
1236*9c5db199SXin Li            myjob.record('END ABORT', None, None, str(instance))
1237*9c5db199SXin Li            assert myjob._record_indent == 0
1238*9c5db199SXin Li            myjob.complete(1)
1239*9c5db199SXin Li        else:
1240*9c5db199SXin Li            sys.exit(1)
1241*9c5db199SXin Li
1242*9c5db199SXin Li    except Exception as e:
1243*9c5db199SXin Li        # NOTE: job._run_step_fn and job.step_engine will turn things into
1244*9c5db199SXin Li        # a JobError for us.  If we get here, its likely an autotest bug.
1245*9c5db199SXin Li        msg = str(e) + '\n' + traceback.format_exc()
1246*9c5db199SXin Li        logging.critical("JOB ERROR (autotest bug?): " + msg)
1247*9c5db199SXin Li        if myjob:
1248*9c5db199SXin Li            myjob.record('END ABORT', None, None, msg)
1249*9c5db199SXin Li            assert myjob._record_indent == 0
1250*9c5db199SXin Li            myjob.complete(1)
1251*9c5db199SXin Li        else:
1252*9c5db199SXin Li            sys.exit(1)
1253*9c5db199SXin Li
1254*9c5db199SXin Li    # If we get here, then we assume the job is complete and good.
1255*9c5db199SXin Li    myjob.record('END GOOD', None, None)
1256*9c5db199SXin Li    assert myjob._record_indent == 0
1257*9c5db199SXin Li
1258*9c5db199SXin Li    myjob.complete(0)
1259*9c5db199SXin Li
1260*9c5db199SXin Li
1261*9c5db199SXin Liclass job(base_client_job):
1262*9c5db199SXin Li
1263*9c5db199SXin Li    def __init__(self, *args, **kwargs):
1264*9c5db199SXin Li        base_client_job.__init__(self, *args, **kwargs)
1265*9c5db199SXin Li
1266*9c5db199SXin Li
1267*9c5db199SXin Li    def run_test(self, url, *args, **dargs):
1268*9c5db199SXin Li        log_pauser = cros_logging.LogRotationPauser()
1269*9c5db199SXin Li        passed = False
1270*9c5db199SXin Li        try:
1271*9c5db199SXin Li            log_pauser.begin()
1272*9c5db199SXin Li            passed = base_client_job.run_test(self, url, *args, **dargs)
1273*9c5db199SXin Li            if not passed:
1274*9c5db199SXin Li                # Save the VM state immediately after the test failure.
1275*9c5db199SXin Li                # This is a NOOP if the the test isn't running in a VM or
1276*9c5db199SXin Li                # if the VM is not properly configured to save state.
1277*9c5db199SXin Li                _group, testname = self.pkgmgr.get_package_name(url, 'test')
1278*9c5db199SXin Li                now = datetime.now().strftime('%I:%M:%S.%f')
1279*9c5db199SXin Li                checkpoint_name = '%s-%s' % (testname, now)
1280*9c5db199SXin Li                utils.save_vm_state(checkpoint_name)
1281*9c5db199SXin Li        finally:
1282*9c5db199SXin Li            log_pauser.end()
1283*9c5db199SXin Li        return passed
1284*9c5db199SXin Li
1285*9c5db199SXin Li
1286*9c5db199SXin Li    def reboot(self):
1287*9c5db199SXin Li        self.reboot_setup()
1288*9c5db199SXin Li        self.harness.run_reboot()
1289*9c5db199SXin Li
1290*9c5db199SXin Li        # sync first, so that a sync during shutdown doesn't time out
1291*9c5db199SXin Li        utils.system('sync; sync', ignore_status=True)
1292*9c5db199SXin Li
1293*9c5db199SXin Li        utils.system('reboot </dev/null >/dev/null 2>&1 &')
1294*9c5db199SXin Li        self.quit()
1295*9c5db199SXin Li
1296*9c5db199SXin Li
1297*9c5db199SXin Li    def require_gcc(self):
1298*9c5db199SXin Li        return False
1299*9c5db199SXin Li
1300*9c5db199SXin Li
1301*9c5db199SXin Li# TODO(ayatane): This logic should be deduplicated with
1302*9c5db199SXin Li# server/cros/dynamic_suite/control_file_getter.py, but the server
1303*9c5db199SXin Li# libraries are not available on clients.
1304*9c5db199SXin Lidef _locate_test_control_file(dirpath, testname):
1305*9c5db199SXin Li    """
1306*9c5db199SXin Li    Locate the control file for the given test.
1307*9c5db199SXin Li
1308*9c5db199SXin Li    @param dirpath Root directory to search.
1309*9c5db199SXin Li    @param testname Name of test.
1310*9c5db199SXin Li
1311*9c5db199SXin Li    @returns Absolute path to the control file.
1312*9c5db199SXin Li    @raise JobError: Raised if control file not found.
1313*9c5db199SXin Li    """
1314*9c5db199SXin Li    for dirpath, _dirnames, filenames in os.walk(dirpath):
1315*9c5db199SXin Li        for filename in filenames:
1316*9c5db199SXin Li            if 'control' not in filename:
1317*9c5db199SXin Li                continue
1318*9c5db199SXin Li            path = os.path.join(dirpath, filename)
1319*9c5db199SXin Li            if _is_control_file_for_test(path, testname):
1320*9c5db199SXin Li                return os.path.abspath(path)
1321*9c5db199SXin Li    raise error.JobError(
1322*9c5db199SXin Li            'could not find client test control file',
1323*9c5db199SXin Li            dirpath, testname)
1324*9c5db199SXin Li
1325*9c5db199SXin Li
1326*9c5db199SXin Li_NAME_PATTERN = "NAME *= *['\"]([^'\"]+)['\"]"
1327*9c5db199SXin Li
1328*9c5db199SXin Li
1329*9c5db199SXin Lidef _is_control_file_for_test(path, testname):
1330*9c5db199SXin Li    with open(path) as f:
1331*9c5db199SXin Li        for line in f:
1332*9c5db199SXin Li            match = re.match(_NAME_PATTERN, line)
1333*9c5db199SXin Li            if match is not None:
1334*9c5db199SXin Li                return match.group(1) == testname
1335