xref: /aosp_15_r20/external/autotest/server/cros/telemetry_runner.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1# Lint as: python3
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6from __future__ import absolute_import
7from __future__ import division
8from __future__ import print_function
9
10import abc
11import json
12import logging
13import numbers
14import os
15import tempfile
16import six
17import sys
18
19import numpy
20
21import common
22from autotest_lib.client.common_lib import error, utils
23from autotest_lib.server.cros import telemetry_setup
24
25TELEMETRY_RUN_BENCHMARKS_SCRIPT = 'tools/perf/run_benchmark'
26TELEMETRY_RUN_TESTS_SCRIPT = 'tools/telemetry/run_tests'
27TELEMETRY_RUN_GPU_TESTS_SCRIPT = 'content/test/gpu/run_gpu_integration_test.py'
28TELEMETRY_TIMEOUT_MINS = 150
29
30DUT_CHROME_ROOT = '/usr/local/telemetry/src'
31
32CHART_JSON_RESULT = 'results-chart.json'
33HISTOGRAM_SET_RESULT = 'histograms.json'
34PROFILE_ARTIFACTS = 'artifacts'
35
36# Result Statuses
37SUCCESS_STATUS = 'SUCCESS'
38WARNING_STATUS = 'WARNING'
39FAILED_STATUS = 'FAILED'
40
41# A list of telemetry tests that cannot run on dut.
42ON_DUT_BLOCKLIST = [
43        'loading.desktop',  # crbug/882299
44        'rendering.desktop',  # crbug/882291
45]
46
47
48class TelemetryResult(object):
49    """Class to represent the results of a telemetry run.
50
51    This class represents the results of a telemetry run, whether it ran
52    successful, failed or had warnings.
53    """
54
55    def __init__(self, exit_code=0, stdout='', stderr=''):
56        """Initializes this TelemetryResultObject instance.
57
58        @param status: Status of the telemtry run.
59        @param stdout: Stdout of the telemetry run.
60        @param stderr: Stderr of the telemetry run.
61        """
62        if exit_code == 0:
63            self.status = SUCCESS_STATUS
64        else:
65            self.status = FAILED_STATUS
66
67        self._stdout = stdout
68        self._stderr = stderr
69        self.output = '\n'.join([stdout, stderr])
70
71
72class TelemetryRunnerFactory(object):
73    """A factory class to determine TelemetryRunner subclass to be used.
74
75    The TelemetryRunner class, today, has various ways to execute the telemetry
76    test. The test can be executed locally (using a tool like test_that) or can
77    be executed in the Lab environment - for this usecase, either the drone OR
78    the devserver can be used.
79
80    A Factory class offloads this determination overhead from the clients. Users
81    of the TelemetryRunner class are highly encouraged to go through this
82    Factory class while determining the correct TelemetryRunner subclass.
83    """
84
85    def get_runner(self,
86                   host,
87                   local=False,
88                   telemetry_on_dut=True,
89                   is_lacros=False):
90        """Method to determine which TelemetryRunner subclass to use."""
91        if local:
92            return LocalTelemetryRunner(host, telemetry_on_dut)
93        else:
94            return DroneTelemetryRunner(host, telemetry_on_dut, is_lacros)
95
96
97class TelemetryRunner(six.with_metaclass(abc.ABCMeta, object)):
98    """Class responsible for telemetry for a given build.
99
100    This class will extract and install telemetry environment and is
101    responsible for executing the telemetry benchmarks and returning their
102    output to the caller.
103    """
104
105    def __init__(self, host, telemetry_on_dut=True, is_lacros=False):
106        """Initializes this telemetry runner instance.
107
108        If telemetry is not installed for this build, it will be.
109
110        @param host: Host where the test will be run.
111        @param telemetry_on_dut: If set, telemetry itself (the test harness)
112                                 will run on dut.
113                                 It decides browser=[system|cros-chrome]
114        @param is_lacros: If true, run telemetry on lacros chrome, by defining
115                          browser=lacros-chrome. It is only valid for remote
116                          test mode.
117        """
118        self._host = host
119        self._telemetry_path = None
120        self._perf_value_writer = None
121        self._setup_telemetry()
122        self._telemetry_on_dut = telemetry_on_dut
123        self._benchmark_deps = None
124        self._is_lacros = is_lacros
125        logging.debug('Telemetry Path: %s', self._telemetry_path)
126
127    def __enter__(self):
128        """Called while entering context manager; does nothing."""
129        return self
130
131    def __exit__(self, exc_type, exc_value, traceback):
132        """Called while exiting context manager."""
133
134    @abc.abstractmethod
135    def _setup_telemetry(self):
136        """Set up telemetry environment."""
137
138    def _get_telemetry_cmd(self, script, test_or_benchmark, output_format,
139                           *args, **kwargs):
140        """Build command to execute telemetry based on script and benchmark.
141
142        @param script: Telemetry script we want to run. For example:
143                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
144        @param test_or_benchmark: Name of the test or benchmark we want to run,
145                                  with the page_set (if required) as part of
146                                  the string.
147        @param output_format: Format of the json result file: histogram or
148                              chart-json.
149        @param args: additional list of arguments to pass to the script.
150        @param kwargs: additional list of keyword arguments to pass to the
151                       script.
152
153        @returns Full telemetry command to execute the script.
154        """
155        telemetry_cmd = []
156        no_verbose = kwargs.get('no_verbose', False)
157
158        output_dir = (DUT_CHROME_ROOT
159                      if self._telemetry_on_dut else self._telemetry_path)
160        # Create a temp directory to hold single test run.
161        if self._perf_value_writer:
162            output_dir = os.path.join(
163                    output_dir, self._perf_value_writer.tmpdir.strip('/'))
164
165        if self._telemetry_on_dut:
166            telemetry_cmd.extend([
167                    self._host.ssh_command(alive_interval=900,
168                                           connection_attempts=4),
169                    sys.executable,
170                    script,
171                    '--output-format=%s' % output_format,
172                    '--output-dir=%s' % output_dir,
173                    '--browser=system',
174            ])
175        else:
176            browser = 'lacros-chrome' if self._is_lacros else 'cros-chrome'
177            telemetry_cmd.extend([
178                    sys.executable,
179                    script,
180                    '--browser=%s' % browser,
181                    '--output-format=%s' % output_format,
182                    '--output-dir=%s' % output_dir,
183                    '--remote=%s' % self._host.hostname,
184            ])
185            if self._host.host_port != self._host.hostname and self._host.host_port:
186                # If the user specify a different port for the DUT, we should
187                # use different telemetry argument to set it up.
188                #
189                # e.g. When user is running experiments with ssh port
190                # forwarding, they specify remote as 127.0.0.1:2222. Now
191                # host_port is 127.0.0.1:2222 and hostname is 127.0.0.1
192                # port is 2222
193                telemetry_cmd.append('--remote-ssh-port=%s' % self._host.port)
194
195        if not no_verbose:
196            telemetry_cmd.append('--verbose')
197        telemetry_cmd.extend(args)
198        telemetry_cmd.append(test_or_benchmark)
199
200        return ' '.join(telemetry_cmd)
201
202    def _scp_telemetry_results_cmd(self, perf_results_dir, output_format,
203                                   artifacts):
204        """Build command to copy the telemetry results from the work directory.
205
206        @param perf_results_dir: directory path where test output is to be
207                                 collected.
208        @param output_format: Format of the json result file: histogram or
209                              chart-json.
210        @param artifacts: Whether we want to copy artifacts directory.
211
212        @returns SCP command to copy the results json to the specified
213                 directory.
214        """
215        if not perf_results_dir:
216            return ''
217
218        output_filename = CHART_JSON_RESULT
219        if output_format == 'histograms':
220            output_filename = HISTOGRAM_SET_RESULT
221        scp_cmd = []
222        if self._telemetry_on_dut:
223            scp_cmd.extend(['scp', '-r'])
224            scp_cmd.append(
225                    self._host.make_ssh_options(
226                            alive_interval=900, connection_attempts=4))
227            if not self._host.is_default_port:
228                scp_cmd.append('-P %d' % self._host.port)
229            src = 'root@%s:%s' % (self._host.hostname, DUT_CHROME_ROOT)
230        else:
231            # Use rsync --remove-source-file to move rather than copy from
232            # work dir. This is because each run will generate certain artifacts
233            # and will not be removed after, making result size getting larger.
234            # We don't do this for results on DUT because 1) rsync doesn't work
235            # 2) DUT will be reflashed frequently and no need to worry about
236            # result size.
237            scp_cmd.extend(['rsync', '-avz', '--remove-source-files'])
238            src = self._telemetry_path
239
240        if self._perf_value_writer:
241            src = os.path.join(src, self._perf_value_writer.tmpdir.strip('/'))
242
243        scp_cmd.append(os.path.join(src, output_filename))
244
245        # Copy artifacts back to result directory if needed.
246        if artifacts:
247            scp_cmd.append(os.path.join(src, PROFILE_ARTIFACTS))
248
249        scp_cmd.append(perf_results_dir)
250        return ' '.join(scp_cmd)
251
252    def _run_cmd(self, cmd):
253        """Execute an command in a external shell and capture the output.
254
255        @param cmd: String of is a valid shell command.
256
257        @returns The standard out, standard error and the integer exit code of
258                 the executed command.
259        """
260        logging.debug('Running: %s', cmd)
261
262        output = six.StringIO()
263        error_output = six.StringIO()
264        exit_code = 0
265        try:
266            result = utils.run(
267                    cmd,
268                    stdout_tee=output,
269                    stderr_tee=error_output,
270                    timeout=TELEMETRY_TIMEOUT_MINS * 60)
271            exit_code = result.exit_status
272        except error.CmdError as e:
273            logging.debug('Error occurred executing.')
274            exit_code = e.result_obj.exit_status
275
276        stdout = output.getvalue()
277        stderr = error_output.getvalue()
278        logging.debug('Completed with exit code: %d.\nstdout:%s\n'
279                      'stderr:%s', exit_code, stdout, stderr)
280        return stdout, stderr, exit_code
281
282    def _run_telemetry(self, script, test_or_benchmark, output_format, *args,
283                       **kwargs):
284        """Runs telemetry on a dut.
285
286        @param script: Telemetry script we want to run. For example:
287                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
288        @param test_or_benchmark: Name of the test or benchmark we want to run,
289                                 with the page_set (if required) as part of the
290                                 string.
291        @param args: additional list of arguments to pass to the script.
292        @param kwargs: additional list of keyword arguments to pass to the
293                       script.
294
295        @returns A TelemetryResult Instance with the results of this telemetry
296                 execution.
297        """
298        # TODO (sbasi crbug.com/239933) add support for incognito mode.
299
300        telemetry_cmd = self._get_telemetry_cmd(script, test_or_benchmark,
301                                                output_format, *args, **kwargs)
302        logging.info('Running Telemetry: %s', telemetry_cmd)
303
304        stdout, stderr, exit_code = self._run_cmd(telemetry_cmd)
305
306        return TelemetryResult(
307                exit_code=exit_code, stdout=stdout, stderr=stderr)
308
309    def _run_scp(self, perf_results_dir, output_format, artifacts=False):
310        """Runs telemetry on a dut.
311
312        @param perf_results_dir: The local directory that results are being
313                                 collected.
314        @param output_format: Format of the json result file.
315        @param artifacts: Whether we want to copy artifacts directory.
316        """
317        scp_cmd = self._scp_telemetry_results_cmd(perf_results_dir,
318                                                  output_format, artifacts)
319        logging.debug('Retrieving Results: %s', scp_cmd)
320        _, _, exit_code = self._run_cmd(scp_cmd)
321        if exit_code != 0:
322            raise error.TestFail('Unable to retrieve results.')
323
324        if output_format == 'histograms':
325            # Converts to chart json format.
326            input_filename = os.path.join(perf_results_dir,
327                                          HISTOGRAM_SET_RESULT)
328            output_filename = os.path.join(perf_results_dir, CHART_JSON_RESULT)
329            histograms = json.loads(open(input_filename).read())
330            chartjson = TelemetryRunner.convert_chart_json(histograms)
331            with open(output_filename, 'w') as fout:
332                fout.write(json.dumps(chartjson, indent=2))
333
334    def _run_test(self, script, test, *args):
335        """Runs a telemetry test on a dut.
336
337        @param script: Which telemetry test script we want to run. Can be
338                       telemetry's base test script or the ChromeOS specific
339                       test script.
340        @param test: Telemetry test we want to run.
341        @param args: additional list of arguments to pass to the script.
342
343        @returns A TelemetryResult Instance with the results of this telemetry
344                 execution.
345        """
346        logging.debug('Running telemetry test: %s', test)
347        telemetry_script = os.path.join(self._telemetry_path, script)
348        result = self._run_telemetry(telemetry_script, test, 'chartjson',
349                                     *args)
350        if result.status is FAILED_STATUS:
351            raise error.TestFail('Telemetry test %s failed.' % test)
352        return result
353
354    def run_telemetry_test(self, test, *args):
355        """Runs a telemetry test on a dut.
356
357        @param test: Telemetry test we want to run.
358        @param args: additional list of arguments to pass to the telemetry
359                     execution script.
360
361        @returns A TelemetryResult Instance with the results of this telemetry
362                 execution.
363        """
364        return self._run_test(TELEMETRY_RUN_TESTS_SCRIPT, test, *args)
365
366    def run_telemetry_benchmark(self,
367                                benchmark,
368                                perf_value_writer=None,
369                                *args,
370                                **kwargs):
371        """Runs a telemetry benchmark on a dut.
372
373        @param benchmark: Benchmark we want to run.
374        @param perf_value_writer: Should be an instance with the function
375                                  output_perf_value(), if None, no perf value
376                                  will be written. Typically this will be the
377                                  job object from an autotest test.
378        @param args: additional list of arguments to pass to the telemetry
379                     execution script.
380        @param kwargs: additional list of keyword arguments to pass to the
381                       telemetry execution script.
382
383        @returns A TelemetryResult Instance with the results of this telemetry
384                 execution.
385        """
386        logging.debug('Running telemetry benchmark: %s', benchmark)
387
388        self._perf_value_writer = perf_value_writer
389
390        if benchmark in ON_DUT_BLOCKLIST:
391            self._telemetry_on_dut = False
392
393        output_format = kwargs.get('ex_output_format', '')
394
395        if not output_format:
396            output_format = 'histograms'
397
398        if self._telemetry_on_dut:
399            telemetry_script = os.path.join(DUT_CHROME_ROOT,
400                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
401            self._ensure_deps(self._host, benchmark)
402        else:
403            telemetry_script = os.path.join(self._telemetry_path,
404                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
405
406        result = self._run_telemetry(telemetry_script, benchmark,
407                                     output_format, *args, **kwargs)
408
409        if result.status is WARNING_STATUS:
410            raise error.TestWarn('Telemetry Benchmark: %s'
411                                 ' exited with Warnings.\nOutput:\n%s\n' %
412                                 (benchmark, result.output))
413        elif result.status is FAILED_STATUS:
414            raise error.TestFail('Telemetry Benchmark: %s'
415                                 ' failed to run.\nOutput:\n%s\n' %
416                                 (benchmark, result.output))
417        elif '[  PASSED  ] 0 tests.' in result.output:
418            raise error.TestWarn('Telemetry Benchmark: %s exited successfully,'
419                                 ' but no test actually passed.\nOutput\n%s\n'
420                                 % (benchmark, result.output))
421        if perf_value_writer:
422            artifacts = kwargs.get('artifacts', False)
423            self._run_scp(perf_value_writer.resultsdir, output_format,
424                          artifacts)
425        return result
426
427    def run_gpu_integration_test(self, test, *args):
428        """Runs a gpu test on a dut.
429
430        @param test: Gpu test we want to run.
431        @param args: additional list of arguments to pass to the telemetry
432                     execution script.
433
434        @returns A TelemetryResult instance with the results of this telemetry
435                 execution.
436        """
437        script = os.path.join(DUT_CHROME_ROOT, TELEMETRY_RUN_GPU_TESTS_SCRIPT)
438        cmd = [
439                self._host.ssh_command(alive_interval=900,
440                                       connection_attempts=4), sys.executable,
441                script
442        ]
443        cmd.extend(args)
444        cmd.append(test)
445        cmd = ' '.join(cmd)
446        stdout, stderr, exit_code = self._run_cmd(cmd)
447
448        if exit_code:
449            raise error.TestFail('Gpu Integration Test: %s'
450                                 ' failed to run.' % test)
451
452        return TelemetryResult(
453                exit_code=exit_code, stdout=stdout, stderr=stderr)
454
455    def _ensure_deps(self, dut, test_name):
456        """
457        Ensure the dependencies are locally available on DUT.
458
459        @param dut: The autotest host object representing DUT.
460        @param test_name: Name of the telemetry test.
461        """
462        # Get DEPs using host's telemetry.
463        # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
464        # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
465        fetch_path = os.path.join(self._telemetry_path, 'tools', 'perf',
466                                  'fetch_benchmark_deps.py')
467        # Use a temporary file for |deps_path| to avoid race conditions. The
468        # created temporary file is assigned to |self._benchmark_deps| to make
469        # it valid until |self| is destroyed.
470        self._benchmark_deps = tempfile.NamedTemporaryFile(
471                prefix='fetch_benchmark_deps_result.', suffix='.json')
472        deps_path = self._benchmark_deps.name
473        command_fetch = (f'{sys.executable} {fetch_path} '
474                         f'--output-deps={deps_path} {test_name}')
475        command_get = f'cat {deps_path}'
476
477        logging.info('Getting DEPs: %s', command_fetch)
478        _, _, exit_code = self._run_cmd(command_fetch)
479        if exit_code != 0:
480            raise error.TestFail('Error occurred while fetching DEPs.')
481        stdout, _, exit_code = self._run_cmd(command_get)
482        if exit_code != 0:
483            raise error.TestFail('Error occurred while getting DEPs.')
484
485        # Download DEPs to DUT.
486        # send_file() relies on rsync over ssh. Couldn't be better.
487        deps = json.loads(stdout)
488        for dep in deps[test_name]:
489            src = os.path.join(self._telemetry_path, dep)
490            dst = os.path.join(DUT_CHROME_ROOT, dep)
491            if not os.path.isfile(src):
492                raise error.TestFail('Error occurred while saving DEPs.')
493            logging.info('Copying: %s -> %s', src, dst)
494            dut.send_file(src, dst)
495
496    @staticmethod
497    def convert_chart_json(histogram_set):
498        """
499        Convert from histogram set to chart json format.
500
501        @param histogram_set: result in histogram set format.
502
503        @returns result in chart json format.
504        """
505        value_map = {}
506
507        # Gets generic set values.
508        for obj in histogram_set:
509            if 'type' in obj and obj['type'] == 'GenericSet':
510                value_map[obj['guid']] = obj['values']
511
512        charts = {}
513        benchmark_name = ''
514        benchmark_desc = ''
515
516        # Checks the unit test for how this conversion works.
517        for obj in histogram_set:
518            if 'name' not in obj or 'sampleValues' not in obj:
519                continue
520            metric_name = obj['name']
521            diagnostics = obj['diagnostics']
522            if 'stories' in diagnostics:
523                story_name = value_map[diagnostics['stories']][0]
524            else:
525                story_name = 'default'
526            local_benchmark_name = value_map[diagnostics['benchmarks']][0]
527            if benchmark_name == '':
528                benchmark_name = local_benchmark_name
529                if 'benchmarkDescriptions' in diagnostics:
530                    benchmark_desc = value_map[
531                            diagnostics['benchmarkDescriptions']][0]
532            if benchmark_name != local_benchmark_name:
533                logging.warning(
534                        'There are more than 1 benchmark names in the'
535                        'result. old: %s, new: %s', benchmark_name,
536                        local_benchmark_name)
537                continue
538
539            unit = obj['unit']
540            smaller_postfixes = ('_smallerIsBetter', '-')
541            bigger_postfixes = ('_biggerIsBetter', '+')
542            all_postfixes = smaller_postfixes + bigger_postfixes
543
544            improvement = 'up'
545            for postfix in smaller_postfixes:
546                if unit.endswith(postfix):
547                    improvement = 'down'
548            for postfix in all_postfixes:
549                if unit.endswith(postfix):
550                    unit = unit[:-len(postfix)]
551                    break
552
553            if unit == 'unitless':
554                unit = 'score'
555
556            values = [
557                    x for x in obj['sampleValues']
558                    if isinstance(x, numbers.Number)
559            ]
560            if metric_name not in charts:
561                charts[metric_name] = {}
562            charts[metric_name][story_name] = {
563                    'improvement_direction': improvement,
564                    'name': metric_name,
565                    'std': numpy.std(values),
566                    'type': 'list_of_scalar_values',
567                    'units': unit,
568                    'values': values
569            }
570
571        # Adds summaries.
572        for metric_name in charts:
573            values = []
574            metric_content = charts[metric_name]
575            for story_name in metric_content:
576                story_content = metric_content[story_name]
577                values += story_content['values']
578                metric_type = story_content['type']
579                units = story_content['units']
580                improvement = story_content['improvement_direction']
581            values.sort()
582            std = numpy.std(values)
583            metric_content['summary'] = {
584                    'improvement_direction': improvement,
585                    'name': metric_name,
586                    'std': std,
587                    'type': metric_type,
588                    'units': units,
589                    'values': values
590            }
591
592        benchmark_metadata = {
593                'description': benchmark_desc,
594                'name': benchmark_name,
595                'type': 'telemetry_benchmark'
596        }
597        return {
598                'benchmark_description': benchmark_desc,
599                'benchmark_metadata': benchmark_metadata,
600                'benchmark_name': benchmark_name,
601                'charts': charts,
602                'format_version': 1.0
603        }
604
605
606class LocalTelemetryRunner(TelemetryRunner):
607    """Specialized TelemetryRunner to handle local telemetry test runs."""
608
609    def __init__(self, *args, **kwargs):
610        """Initialize LocalTelemetryRunner.
611
612        The telemetry test will run locally. Depending on whether
613        telemetry_on_dut is True or False, there can be possible combinations
614        for the execution of this test:
615
616        telemetry_on_dut=False:
617        python run_benchmark --browser=cros-chrome --remote=[dut] [test]
618
619        telemetry_on_dut=True:
620        ssh [dut] python run_benchmark --browser=system [test]
621
622        @param args: The list of arguments to be passed. See Base class for a
623                     complete list of accepted arguments.
624        @param kwargs: Any keyword arguments to be passed. See Base class for a
625                       complete list of accepted keyword arguments.
626        """
627        super(LocalTelemetryRunner, self).__init__(*args, **kwargs)
628
629    def _setup_telemetry(self):
630        """Setup Telemetry to use local path to its sources.
631
632        First look for chrome source root, either externally mounted, or inside
633        the chroot.  Prefer chrome-src-internal source tree to chrome-src.
634        """
635        TELEMETRY_DIR = 'src'
636        CHROME_LOCAL_SRC = '/var/cache/chromeos-cache/distfiles/target/'
637        CHROME_EXTERNAL_SRC = os.path.expanduser('~/chrome_root/')
638
639        logging.debug('Setting up telemetry for local testing')
640
641        sources_list = ('chrome-src-internal', 'chrome-src')
642        dir_list = [CHROME_EXTERNAL_SRC]
643        dir_list.extend(
644                [os.path.join(CHROME_LOCAL_SRC, x) for x in sources_list])
645        if 'CHROME_ROOT' in os.environ:
646            dir_list.insert(0, os.environ['CHROME_ROOT'])
647
648        telemetry_src = ''
649        for dir in dir_list:
650            if os.path.exists(dir):
651                telemetry_src = os.path.join(dir, TELEMETRY_DIR)
652                break
653        else:
654            raise error.TestError('Telemetry source directory not found.')
655
656        self._telemetry_path = telemetry_src
657
658
659class DroneTelemetryRunner(TelemetryRunner):
660    """Handle telemetry test setup on the drone.
661
662    Users of this class are strongly advised to use this class as a context
663    manager. Since the setup for telemetry environment happens on the drone, it
664    is imperative that this setup be cleaned up once the test is done. Using
665    this class as a context manager will transfer the burden of clean up from
666    the user to Python.
667    """
668
669    def __init__(self, *args, **kwargs):
670        """Initialize DroneTelemetryRunner.
671
672        The telemetry test will run on the drone. Depending on whether
673        telemetry_on_dut is True or False, there can be possible combinations
674        for the execution of this test:
675
676        telemetry_on_dut=False:
677        python run_benchmark --browser=cros-chrome --remote=[dut] [test]
678
679        telemetry_on_dut=True:
680        ssh [dut] python run_benchmark --browser=system [test]
681
682        @param args: The list of arguments to be passed. See Base class for a
683                     complete list of accepted arguments.
684        @param kwargs: Any keyword arguments to be passed. See Base class for a
685                       complete list of accepted keyword arguments.
686        """
687        self._telemetry_setup = None
688        super(DroneTelemetryRunner, self).__init__(*args, **kwargs)
689
690    def __enter__(self):
691        """Called while entering context manager; does nothing."""
692        return self
693
694    def __exit__(self, exc_type, exc_value, traceback):
695        """Called while exiting context manager; cleans up temp files."""
696        logging.info('Cleaning up the telemetry environment on the drone.')
697        self._telemetry_setup.Cleanup()
698
699    def _setup_telemetry(self):
700        """Setup Telemetry on the drone."""
701        logging.debug('Setting up telemetry on the drone')
702        info = self._host.host_info_store.get()
703        if not info.build:
704            logging.error('Unable to locate build label for host: %s.',
705                          self._host.host_port)
706            raise error.AutotestError('Failed to grab build for host %s.' %
707                                      self._host.host_port)
708
709        logging.debug('Setting up telemetry for build: %s', info.build)
710        try:
711            self._telemetry_setup = telemetry_setup.TelemetrySetup(
712                    hostname=self._host.hostname, build=info.build)
713            self._telemetry_path = self._telemetry_setup.Setup()
714        except telemetry_setup.TelemetrySetupError as e:
715            raise error.AutotestError('Telemetry Environment could not be '
716                                      'setup: %s.' % e)
717