xref: /aosp_15_r20/external/autotest/server/cros/telemetry_runner.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1*9c5db199SXin Li# Lint as: python3
2*9c5db199SXin Li# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3*9c5db199SXin Li# Use of this source code is governed by a BSD-style license that can be
4*9c5db199SXin Li# found in the LICENSE file.
5*9c5db199SXin Li
6*9c5db199SXin Lifrom __future__ import absolute_import
7*9c5db199SXin Lifrom __future__ import division
8*9c5db199SXin Lifrom __future__ import print_function
9*9c5db199SXin Li
10*9c5db199SXin Liimport abc
11*9c5db199SXin Liimport json
12*9c5db199SXin Liimport logging
13*9c5db199SXin Liimport numbers
14*9c5db199SXin Liimport os
15*9c5db199SXin Liimport tempfile
16*9c5db199SXin Liimport six
17*9c5db199SXin Liimport sys
18*9c5db199SXin Li
19*9c5db199SXin Liimport numpy
20*9c5db199SXin Li
21*9c5db199SXin Liimport common
22*9c5db199SXin Lifrom autotest_lib.client.common_lib import error, utils
23*9c5db199SXin Lifrom autotest_lib.server.cros import telemetry_setup
24*9c5db199SXin Li
25*9c5db199SXin LiTELEMETRY_RUN_BENCHMARKS_SCRIPT = 'tools/perf/run_benchmark'
26*9c5db199SXin LiTELEMETRY_RUN_TESTS_SCRIPT = 'tools/telemetry/run_tests'
27*9c5db199SXin LiTELEMETRY_RUN_GPU_TESTS_SCRIPT = 'content/test/gpu/run_gpu_integration_test.py'
28*9c5db199SXin LiTELEMETRY_TIMEOUT_MINS = 150
29*9c5db199SXin Li
30*9c5db199SXin LiDUT_CHROME_ROOT = '/usr/local/telemetry/src'
31*9c5db199SXin Li
32*9c5db199SXin LiCHART_JSON_RESULT = 'results-chart.json'
33*9c5db199SXin LiHISTOGRAM_SET_RESULT = 'histograms.json'
34*9c5db199SXin LiPROFILE_ARTIFACTS = 'artifacts'
35*9c5db199SXin Li
36*9c5db199SXin Li# Result Statuses
37*9c5db199SXin LiSUCCESS_STATUS = 'SUCCESS'
38*9c5db199SXin LiWARNING_STATUS = 'WARNING'
39*9c5db199SXin LiFAILED_STATUS = 'FAILED'
40*9c5db199SXin Li
41*9c5db199SXin Li# A list of telemetry tests that cannot run on dut.
42*9c5db199SXin LiON_DUT_BLOCKLIST = [
43*9c5db199SXin Li        'loading.desktop',  # crbug/882299
44*9c5db199SXin Li        'rendering.desktop',  # crbug/882291
45*9c5db199SXin Li]
46*9c5db199SXin Li
47*9c5db199SXin Li
48*9c5db199SXin Liclass TelemetryResult(object):
49*9c5db199SXin Li    """Class to represent the results of a telemetry run.
50*9c5db199SXin Li
51*9c5db199SXin Li    This class represents the results of a telemetry run, whether it ran
52*9c5db199SXin Li    successful, failed or had warnings.
53*9c5db199SXin Li    """
54*9c5db199SXin Li
55*9c5db199SXin Li    def __init__(self, exit_code=0, stdout='', stderr=''):
56*9c5db199SXin Li        """Initializes this TelemetryResultObject instance.
57*9c5db199SXin Li
58*9c5db199SXin Li        @param status: Status of the telemtry run.
59*9c5db199SXin Li        @param stdout: Stdout of the telemetry run.
60*9c5db199SXin Li        @param stderr: Stderr of the telemetry run.
61*9c5db199SXin Li        """
62*9c5db199SXin Li        if exit_code == 0:
63*9c5db199SXin Li            self.status = SUCCESS_STATUS
64*9c5db199SXin Li        else:
65*9c5db199SXin Li            self.status = FAILED_STATUS
66*9c5db199SXin Li
67*9c5db199SXin Li        self._stdout = stdout
68*9c5db199SXin Li        self._stderr = stderr
69*9c5db199SXin Li        self.output = '\n'.join([stdout, stderr])
70*9c5db199SXin Li
71*9c5db199SXin Li
72*9c5db199SXin Liclass TelemetryRunnerFactory(object):
73*9c5db199SXin Li    """A factory class to determine TelemetryRunner subclass to be used.
74*9c5db199SXin Li
75*9c5db199SXin Li    The TelemetryRunner class, today, has various ways to execute the telemetry
76*9c5db199SXin Li    test. The test can be executed locally (using a tool like test_that) or can
77*9c5db199SXin Li    be executed in the Lab environment - for this usecase, either the drone OR
78*9c5db199SXin Li    the devserver can be used.
79*9c5db199SXin Li
80*9c5db199SXin Li    A Factory class offloads this determination overhead from the clients. Users
81*9c5db199SXin Li    of the TelemetryRunner class are highly encouraged to go through this
82*9c5db199SXin Li    Factory class while determining the correct TelemetryRunner subclass.
83*9c5db199SXin Li    """
84*9c5db199SXin Li
85*9c5db199SXin Li    def get_runner(self,
86*9c5db199SXin Li                   host,
87*9c5db199SXin Li                   local=False,
88*9c5db199SXin Li                   telemetry_on_dut=True,
89*9c5db199SXin Li                   is_lacros=False):
90*9c5db199SXin Li        """Method to determine which TelemetryRunner subclass to use."""
91*9c5db199SXin Li        if local:
92*9c5db199SXin Li            return LocalTelemetryRunner(host, telemetry_on_dut)
93*9c5db199SXin Li        else:
94*9c5db199SXin Li            return DroneTelemetryRunner(host, telemetry_on_dut, is_lacros)
95*9c5db199SXin Li
96*9c5db199SXin Li
97*9c5db199SXin Liclass TelemetryRunner(six.with_metaclass(abc.ABCMeta, object)):
98*9c5db199SXin Li    """Class responsible for telemetry for a given build.
99*9c5db199SXin Li
100*9c5db199SXin Li    This class will extract and install telemetry environment and is
101*9c5db199SXin Li    responsible for executing the telemetry benchmarks and returning their
102*9c5db199SXin Li    output to the caller.
103*9c5db199SXin Li    """
104*9c5db199SXin Li
105*9c5db199SXin Li    def __init__(self, host, telemetry_on_dut=True, is_lacros=False):
106*9c5db199SXin Li        """Initializes this telemetry runner instance.
107*9c5db199SXin Li
108*9c5db199SXin Li        If telemetry is not installed for this build, it will be.
109*9c5db199SXin Li
110*9c5db199SXin Li        @param host: Host where the test will be run.
111*9c5db199SXin Li        @param telemetry_on_dut: If set, telemetry itself (the test harness)
112*9c5db199SXin Li                                 will run on dut.
113*9c5db199SXin Li                                 It decides browser=[system|cros-chrome]
114*9c5db199SXin Li        @param is_lacros: If true, run telemetry on lacros chrome, by defining
115*9c5db199SXin Li                          browser=lacros-chrome. It is only valid for remote
116*9c5db199SXin Li                          test mode.
117*9c5db199SXin Li        """
118*9c5db199SXin Li        self._host = host
119*9c5db199SXin Li        self._telemetry_path = None
120*9c5db199SXin Li        self._perf_value_writer = None
121*9c5db199SXin Li        self._setup_telemetry()
122*9c5db199SXin Li        self._telemetry_on_dut = telemetry_on_dut
123*9c5db199SXin Li        self._benchmark_deps = None
124*9c5db199SXin Li        self._is_lacros = is_lacros
125*9c5db199SXin Li        logging.debug('Telemetry Path: %s', self._telemetry_path)
126*9c5db199SXin Li
127*9c5db199SXin Li    def __enter__(self):
128*9c5db199SXin Li        """Called while entering context manager; does nothing."""
129*9c5db199SXin Li        return self
130*9c5db199SXin Li
131*9c5db199SXin Li    def __exit__(self, exc_type, exc_value, traceback):
132*9c5db199SXin Li        """Called while exiting context manager."""
133*9c5db199SXin Li
134*9c5db199SXin Li    @abc.abstractmethod
135*9c5db199SXin Li    def _setup_telemetry(self):
136*9c5db199SXin Li        """Set up telemetry environment."""
137*9c5db199SXin Li
138*9c5db199SXin Li    def _get_telemetry_cmd(self, script, test_or_benchmark, output_format,
139*9c5db199SXin Li                           *args, **kwargs):
140*9c5db199SXin Li        """Build command to execute telemetry based on script and benchmark.
141*9c5db199SXin Li
142*9c5db199SXin Li        @param script: Telemetry script we want to run. For example:
143*9c5db199SXin Li                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
144*9c5db199SXin Li        @param test_or_benchmark: Name of the test or benchmark we want to run,
145*9c5db199SXin Li                                  with the page_set (if required) as part of
146*9c5db199SXin Li                                  the string.
147*9c5db199SXin Li        @param output_format: Format of the json result file: histogram or
148*9c5db199SXin Li                              chart-json.
149*9c5db199SXin Li        @param args: additional list of arguments to pass to the script.
150*9c5db199SXin Li        @param kwargs: additional list of keyword arguments to pass to the
151*9c5db199SXin Li                       script.
152*9c5db199SXin Li
153*9c5db199SXin Li        @returns Full telemetry command to execute the script.
154*9c5db199SXin Li        """
155*9c5db199SXin Li        telemetry_cmd = []
156*9c5db199SXin Li        no_verbose = kwargs.get('no_verbose', False)
157*9c5db199SXin Li
158*9c5db199SXin Li        output_dir = (DUT_CHROME_ROOT
159*9c5db199SXin Li                      if self._telemetry_on_dut else self._telemetry_path)
160*9c5db199SXin Li        # Create a temp directory to hold single test run.
161*9c5db199SXin Li        if self._perf_value_writer:
162*9c5db199SXin Li            output_dir = os.path.join(
163*9c5db199SXin Li                    output_dir, self._perf_value_writer.tmpdir.strip('/'))
164*9c5db199SXin Li
165*9c5db199SXin Li        if self._telemetry_on_dut:
166*9c5db199SXin Li            telemetry_cmd.extend([
167*9c5db199SXin Li                    self._host.ssh_command(alive_interval=900,
168*9c5db199SXin Li                                           connection_attempts=4),
169*9c5db199SXin Li                    sys.executable,
170*9c5db199SXin Li                    script,
171*9c5db199SXin Li                    '--output-format=%s' % output_format,
172*9c5db199SXin Li                    '--output-dir=%s' % output_dir,
173*9c5db199SXin Li                    '--browser=system',
174*9c5db199SXin Li            ])
175*9c5db199SXin Li        else:
176*9c5db199SXin Li            browser = 'lacros-chrome' if self._is_lacros else 'cros-chrome'
177*9c5db199SXin Li            telemetry_cmd.extend([
178*9c5db199SXin Li                    sys.executable,
179*9c5db199SXin Li                    script,
180*9c5db199SXin Li                    '--browser=%s' % browser,
181*9c5db199SXin Li                    '--output-format=%s' % output_format,
182*9c5db199SXin Li                    '--output-dir=%s' % output_dir,
183*9c5db199SXin Li                    '--remote=%s' % self._host.hostname,
184*9c5db199SXin Li            ])
185*9c5db199SXin Li            if self._host.host_port != self._host.hostname and self._host.host_port:
186*9c5db199SXin Li                # If the user specify a different port for the DUT, we should
187*9c5db199SXin Li                # use different telemetry argument to set it up.
188*9c5db199SXin Li                #
189*9c5db199SXin Li                # e.g. When user is running experiments with ssh port
190*9c5db199SXin Li                # forwarding, they specify remote as 127.0.0.1:2222. Now
191*9c5db199SXin Li                # host_port is 127.0.0.1:2222 and hostname is 127.0.0.1
192*9c5db199SXin Li                # port is 2222
193*9c5db199SXin Li                telemetry_cmd.append('--remote-ssh-port=%s' % self._host.port)
194*9c5db199SXin Li
195*9c5db199SXin Li        if not no_verbose:
196*9c5db199SXin Li            telemetry_cmd.append('--verbose')
197*9c5db199SXin Li        telemetry_cmd.extend(args)
198*9c5db199SXin Li        telemetry_cmd.append(test_or_benchmark)
199*9c5db199SXin Li
200*9c5db199SXin Li        return ' '.join(telemetry_cmd)
201*9c5db199SXin Li
202*9c5db199SXin Li    def _scp_telemetry_results_cmd(self, perf_results_dir, output_format,
203*9c5db199SXin Li                                   artifacts):
204*9c5db199SXin Li        """Build command to copy the telemetry results from the work directory.
205*9c5db199SXin Li
206*9c5db199SXin Li        @param perf_results_dir: directory path where test output is to be
207*9c5db199SXin Li                                 collected.
208*9c5db199SXin Li        @param output_format: Format of the json result file: histogram or
209*9c5db199SXin Li                              chart-json.
210*9c5db199SXin Li        @param artifacts: Whether we want to copy artifacts directory.
211*9c5db199SXin Li
212*9c5db199SXin Li        @returns SCP command to copy the results json to the specified
213*9c5db199SXin Li                 directory.
214*9c5db199SXin Li        """
215*9c5db199SXin Li        if not perf_results_dir:
216*9c5db199SXin Li            return ''
217*9c5db199SXin Li
218*9c5db199SXin Li        output_filename = CHART_JSON_RESULT
219*9c5db199SXin Li        if output_format == 'histograms':
220*9c5db199SXin Li            output_filename = HISTOGRAM_SET_RESULT
221*9c5db199SXin Li        scp_cmd = []
222*9c5db199SXin Li        if self._telemetry_on_dut:
223*9c5db199SXin Li            scp_cmd.extend(['scp', '-r'])
224*9c5db199SXin Li            scp_cmd.append(
225*9c5db199SXin Li                    self._host.make_ssh_options(
226*9c5db199SXin Li                            alive_interval=900, connection_attempts=4))
227*9c5db199SXin Li            if not self._host.is_default_port:
228*9c5db199SXin Li                scp_cmd.append('-P %d' % self._host.port)
229*9c5db199SXin Li            src = 'root@%s:%s' % (self._host.hostname, DUT_CHROME_ROOT)
230*9c5db199SXin Li        else:
231*9c5db199SXin Li            # Use rsync --remove-source-file to move rather than copy from
232*9c5db199SXin Li            # work dir. This is because each run will generate certain artifacts
233*9c5db199SXin Li            # and will not be removed after, making result size getting larger.
234*9c5db199SXin Li            # We don't do this for results on DUT because 1) rsync doesn't work
235*9c5db199SXin Li            # 2) DUT will be reflashed frequently and no need to worry about
236*9c5db199SXin Li            # result size.
237*9c5db199SXin Li            scp_cmd.extend(['rsync', '-avz', '--remove-source-files'])
238*9c5db199SXin Li            src = self._telemetry_path
239*9c5db199SXin Li
240*9c5db199SXin Li        if self._perf_value_writer:
241*9c5db199SXin Li            src = os.path.join(src, self._perf_value_writer.tmpdir.strip('/'))
242*9c5db199SXin Li
243*9c5db199SXin Li        scp_cmd.append(os.path.join(src, output_filename))
244*9c5db199SXin Li
245*9c5db199SXin Li        # Copy artifacts back to result directory if needed.
246*9c5db199SXin Li        if artifacts:
247*9c5db199SXin Li            scp_cmd.append(os.path.join(src, PROFILE_ARTIFACTS))
248*9c5db199SXin Li
249*9c5db199SXin Li        scp_cmd.append(perf_results_dir)
250*9c5db199SXin Li        return ' '.join(scp_cmd)
251*9c5db199SXin Li
252*9c5db199SXin Li    def _run_cmd(self, cmd):
253*9c5db199SXin Li        """Execute an command in a external shell and capture the output.
254*9c5db199SXin Li
255*9c5db199SXin Li        @param cmd: String of is a valid shell command.
256*9c5db199SXin Li
257*9c5db199SXin Li        @returns The standard out, standard error and the integer exit code of
258*9c5db199SXin Li                 the executed command.
259*9c5db199SXin Li        """
260*9c5db199SXin Li        logging.debug('Running: %s', cmd)
261*9c5db199SXin Li
262*9c5db199SXin Li        output = six.StringIO()
263*9c5db199SXin Li        error_output = six.StringIO()
264*9c5db199SXin Li        exit_code = 0
265*9c5db199SXin Li        try:
266*9c5db199SXin Li            result = utils.run(
267*9c5db199SXin Li                    cmd,
268*9c5db199SXin Li                    stdout_tee=output,
269*9c5db199SXin Li                    stderr_tee=error_output,
270*9c5db199SXin Li                    timeout=TELEMETRY_TIMEOUT_MINS * 60)
271*9c5db199SXin Li            exit_code = result.exit_status
272*9c5db199SXin Li        except error.CmdError as e:
273*9c5db199SXin Li            logging.debug('Error occurred executing.')
274*9c5db199SXin Li            exit_code = e.result_obj.exit_status
275*9c5db199SXin Li
276*9c5db199SXin Li        stdout = output.getvalue()
277*9c5db199SXin Li        stderr = error_output.getvalue()
278*9c5db199SXin Li        logging.debug('Completed with exit code: %d.\nstdout:%s\n'
279*9c5db199SXin Li                      'stderr:%s', exit_code, stdout, stderr)
280*9c5db199SXin Li        return stdout, stderr, exit_code
281*9c5db199SXin Li
282*9c5db199SXin Li    def _run_telemetry(self, script, test_or_benchmark, output_format, *args,
283*9c5db199SXin Li                       **kwargs):
284*9c5db199SXin Li        """Runs telemetry on a dut.
285*9c5db199SXin Li
286*9c5db199SXin Li        @param script: Telemetry script we want to run. For example:
287*9c5db199SXin Li                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
288*9c5db199SXin Li        @param test_or_benchmark: Name of the test or benchmark we want to run,
289*9c5db199SXin Li                                 with the page_set (if required) as part of the
290*9c5db199SXin Li                                 string.
291*9c5db199SXin Li        @param args: additional list of arguments to pass to the script.
292*9c5db199SXin Li        @param kwargs: additional list of keyword arguments to pass to the
293*9c5db199SXin Li                       script.
294*9c5db199SXin Li
295*9c5db199SXin Li        @returns A TelemetryResult Instance with the results of this telemetry
296*9c5db199SXin Li                 execution.
297*9c5db199SXin Li        """
298*9c5db199SXin Li        # TODO (sbasi crbug.com/239933) add support for incognito mode.
299*9c5db199SXin Li
300*9c5db199SXin Li        telemetry_cmd = self._get_telemetry_cmd(script, test_or_benchmark,
301*9c5db199SXin Li                                                output_format, *args, **kwargs)
302*9c5db199SXin Li        logging.info('Running Telemetry: %s', telemetry_cmd)
303*9c5db199SXin Li
304*9c5db199SXin Li        stdout, stderr, exit_code = self._run_cmd(telemetry_cmd)
305*9c5db199SXin Li
306*9c5db199SXin Li        return TelemetryResult(
307*9c5db199SXin Li                exit_code=exit_code, stdout=stdout, stderr=stderr)
308*9c5db199SXin Li
309*9c5db199SXin Li    def _run_scp(self, perf_results_dir, output_format, artifacts=False):
310*9c5db199SXin Li        """Runs telemetry on a dut.
311*9c5db199SXin Li
312*9c5db199SXin Li        @param perf_results_dir: The local directory that results are being
313*9c5db199SXin Li                                 collected.
314*9c5db199SXin Li        @param output_format: Format of the json result file.
315*9c5db199SXin Li        @param artifacts: Whether we want to copy artifacts directory.
316*9c5db199SXin Li        """
317*9c5db199SXin Li        scp_cmd = self._scp_telemetry_results_cmd(perf_results_dir,
318*9c5db199SXin Li                                                  output_format, artifacts)
319*9c5db199SXin Li        logging.debug('Retrieving Results: %s', scp_cmd)
320*9c5db199SXin Li        _, _, exit_code = self._run_cmd(scp_cmd)
321*9c5db199SXin Li        if exit_code != 0:
322*9c5db199SXin Li            raise error.TestFail('Unable to retrieve results.')
323*9c5db199SXin Li
324*9c5db199SXin Li        if output_format == 'histograms':
325*9c5db199SXin Li            # Converts to chart json format.
326*9c5db199SXin Li            input_filename = os.path.join(perf_results_dir,
327*9c5db199SXin Li                                          HISTOGRAM_SET_RESULT)
328*9c5db199SXin Li            output_filename = os.path.join(perf_results_dir, CHART_JSON_RESULT)
329*9c5db199SXin Li            histograms = json.loads(open(input_filename).read())
330*9c5db199SXin Li            chartjson = TelemetryRunner.convert_chart_json(histograms)
331*9c5db199SXin Li            with open(output_filename, 'w') as fout:
332*9c5db199SXin Li                fout.write(json.dumps(chartjson, indent=2))
333*9c5db199SXin Li
334*9c5db199SXin Li    def _run_test(self, script, test, *args):
335*9c5db199SXin Li        """Runs a telemetry test on a dut.
336*9c5db199SXin Li
337*9c5db199SXin Li        @param script: Which telemetry test script we want to run. Can be
338*9c5db199SXin Li                       telemetry's base test script or the ChromeOS specific
339*9c5db199SXin Li                       test script.
340*9c5db199SXin Li        @param test: Telemetry test we want to run.
341*9c5db199SXin Li        @param args: additional list of arguments to pass to the script.
342*9c5db199SXin Li
343*9c5db199SXin Li        @returns A TelemetryResult Instance with the results of this telemetry
344*9c5db199SXin Li                 execution.
345*9c5db199SXin Li        """
346*9c5db199SXin Li        logging.debug('Running telemetry test: %s', test)
347*9c5db199SXin Li        telemetry_script = os.path.join(self._telemetry_path, script)
348*9c5db199SXin Li        result = self._run_telemetry(telemetry_script, test, 'chartjson',
349*9c5db199SXin Li                                     *args)
350*9c5db199SXin Li        if result.status is FAILED_STATUS:
351*9c5db199SXin Li            raise error.TestFail('Telemetry test %s failed.' % test)
352*9c5db199SXin Li        return result
353*9c5db199SXin Li
354*9c5db199SXin Li    def run_telemetry_test(self, test, *args):
355*9c5db199SXin Li        """Runs a telemetry test on a dut.
356*9c5db199SXin Li
357*9c5db199SXin Li        @param test: Telemetry test we want to run.
358*9c5db199SXin Li        @param args: additional list of arguments to pass to the telemetry
359*9c5db199SXin Li                     execution script.
360*9c5db199SXin Li
361*9c5db199SXin Li        @returns A TelemetryResult Instance with the results of this telemetry
362*9c5db199SXin Li                 execution.
363*9c5db199SXin Li        """
364*9c5db199SXin Li        return self._run_test(TELEMETRY_RUN_TESTS_SCRIPT, test, *args)
365*9c5db199SXin Li
366*9c5db199SXin Li    def run_telemetry_benchmark(self,
367*9c5db199SXin Li                                benchmark,
368*9c5db199SXin Li                                perf_value_writer=None,
369*9c5db199SXin Li                                *args,
370*9c5db199SXin Li                                **kwargs):
371*9c5db199SXin Li        """Runs a telemetry benchmark on a dut.
372*9c5db199SXin Li
373*9c5db199SXin Li        @param benchmark: Benchmark we want to run.
374*9c5db199SXin Li        @param perf_value_writer: Should be an instance with the function
375*9c5db199SXin Li                                  output_perf_value(), if None, no perf value
376*9c5db199SXin Li                                  will be written. Typically this will be the
377*9c5db199SXin Li                                  job object from an autotest test.
378*9c5db199SXin Li        @param args: additional list of arguments to pass to the telemetry
379*9c5db199SXin Li                     execution script.
380*9c5db199SXin Li        @param kwargs: additional list of keyword arguments to pass to the
381*9c5db199SXin Li                       telemetry execution script.
382*9c5db199SXin Li
383*9c5db199SXin Li        @returns A TelemetryResult Instance with the results of this telemetry
384*9c5db199SXin Li                 execution.
385*9c5db199SXin Li        """
386*9c5db199SXin Li        logging.debug('Running telemetry benchmark: %s', benchmark)
387*9c5db199SXin Li
388*9c5db199SXin Li        self._perf_value_writer = perf_value_writer
389*9c5db199SXin Li
390*9c5db199SXin Li        if benchmark in ON_DUT_BLOCKLIST:
391*9c5db199SXin Li            self._telemetry_on_dut = False
392*9c5db199SXin Li
393*9c5db199SXin Li        output_format = kwargs.get('ex_output_format', '')
394*9c5db199SXin Li
395*9c5db199SXin Li        if not output_format:
396*9c5db199SXin Li            output_format = 'histograms'
397*9c5db199SXin Li
398*9c5db199SXin Li        if self._telemetry_on_dut:
399*9c5db199SXin Li            telemetry_script = os.path.join(DUT_CHROME_ROOT,
400*9c5db199SXin Li                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
401*9c5db199SXin Li            self._ensure_deps(self._host, benchmark)
402*9c5db199SXin Li        else:
403*9c5db199SXin Li            telemetry_script = os.path.join(self._telemetry_path,
404*9c5db199SXin Li                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
405*9c5db199SXin Li
406*9c5db199SXin Li        result = self._run_telemetry(telemetry_script, benchmark,
407*9c5db199SXin Li                                     output_format, *args, **kwargs)
408*9c5db199SXin Li
409*9c5db199SXin Li        if result.status is WARNING_STATUS:
410*9c5db199SXin Li            raise error.TestWarn('Telemetry Benchmark: %s'
411*9c5db199SXin Li                                 ' exited with Warnings.\nOutput:\n%s\n' %
412*9c5db199SXin Li                                 (benchmark, result.output))
413*9c5db199SXin Li        elif result.status is FAILED_STATUS:
414*9c5db199SXin Li            raise error.TestFail('Telemetry Benchmark: %s'
415*9c5db199SXin Li                                 ' failed to run.\nOutput:\n%s\n' %
416*9c5db199SXin Li                                 (benchmark, result.output))
417*9c5db199SXin Li        elif '[  PASSED  ] 0 tests.' in result.output:
418*9c5db199SXin Li            raise error.TestWarn('Telemetry Benchmark: %s exited successfully,'
419*9c5db199SXin Li                                 ' but no test actually passed.\nOutput\n%s\n'
420*9c5db199SXin Li                                 % (benchmark, result.output))
421*9c5db199SXin Li        if perf_value_writer:
422*9c5db199SXin Li            artifacts = kwargs.get('artifacts', False)
423*9c5db199SXin Li            self._run_scp(perf_value_writer.resultsdir, output_format,
424*9c5db199SXin Li                          artifacts)
425*9c5db199SXin Li        return result
426*9c5db199SXin Li
427*9c5db199SXin Li    def run_gpu_integration_test(self, test, *args):
428*9c5db199SXin Li        """Runs a gpu test on a dut.
429*9c5db199SXin Li
430*9c5db199SXin Li        @param test: Gpu test we want to run.
431*9c5db199SXin Li        @param args: additional list of arguments to pass to the telemetry
432*9c5db199SXin Li                     execution script.
433*9c5db199SXin Li
434*9c5db199SXin Li        @returns A TelemetryResult instance with the results of this telemetry
435*9c5db199SXin Li                 execution.
436*9c5db199SXin Li        """
437*9c5db199SXin Li        script = os.path.join(DUT_CHROME_ROOT, TELEMETRY_RUN_GPU_TESTS_SCRIPT)
438*9c5db199SXin Li        cmd = [
439*9c5db199SXin Li                self._host.ssh_command(alive_interval=900,
440*9c5db199SXin Li                                       connection_attempts=4), sys.executable,
441*9c5db199SXin Li                script
442*9c5db199SXin Li        ]
443*9c5db199SXin Li        cmd.extend(args)
444*9c5db199SXin Li        cmd.append(test)
445*9c5db199SXin Li        cmd = ' '.join(cmd)
446*9c5db199SXin Li        stdout, stderr, exit_code = self._run_cmd(cmd)
447*9c5db199SXin Li
448*9c5db199SXin Li        if exit_code:
449*9c5db199SXin Li            raise error.TestFail('Gpu Integration Test: %s'
450*9c5db199SXin Li                                 ' failed to run.' % test)
451*9c5db199SXin Li
452*9c5db199SXin Li        return TelemetryResult(
453*9c5db199SXin Li                exit_code=exit_code, stdout=stdout, stderr=stderr)
454*9c5db199SXin Li
455*9c5db199SXin Li    def _ensure_deps(self, dut, test_name):
456*9c5db199SXin Li        """
457*9c5db199SXin Li        Ensure the dependencies are locally available on DUT.
458*9c5db199SXin Li
459*9c5db199SXin Li        @param dut: The autotest host object representing DUT.
460*9c5db199SXin Li        @param test_name: Name of the telemetry test.
461*9c5db199SXin Li        """
462*9c5db199SXin Li        # Get DEPs using host's telemetry.
463*9c5db199SXin Li        # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
464*9c5db199SXin Li        # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
465*9c5db199SXin Li        fetch_path = os.path.join(self._telemetry_path, 'tools', 'perf',
466*9c5db199SXin Li                                  'fetch_benchmark_deps.py')
467*9c5db199SXin Li        # Use a temporary file for |deps_path| to avoid race conditions. The
468*9c5db199SXin Li        # created temporary file is assigned to |self._benchmark_deps| to make
469*9c5db199SXin Li        # it valid until |self| is destroyed.
470*9c5db199SXin Li        self._benchmark_deps = tempfile.NamedTemporaryFile(
471*9c5db199SXin Li                prefix='fetch_benchmark_deps_result.', suffix='.json')
472*9c5db199SXin Li        deps_path = self._benchmark_deps.name
473*9c5db199SXin Li        command_fetch = (f'{sys.executable} {fetch_path} '
474*9c5db199SXin Li                         f'--output-deps={deps_path} {test_name}')
475*9c5db199SXin Li        command_get = f'cat {deps_path}'
476*9c5db199SXin Li
477*9c5db199SXin Li        logging.info('Getting DEPs: %s', command_fetch)
478*9c5db199SXin Li        _, _, exit_code = self._run_cmd(command_fetch)
479*9c5db199SXin Li        if exit_code != 0:
480*9c5db199SXin Li            raise error.TestFail('Error occurred while fetching DEPs.')
481*9c5db199SXin Li        stdout, _, exit_code = self._run_cmd(command_get)
482*9c5db199SXin Li        if exit_code != 0:
483*9c5db199SXin Li            raise error.TestFail('Error occurred while getting DEPs.')
484*9c5db199SXin Li
485*9c5db199SXin Li        # Download DEPs to DUT.
486*9c5db199SXin Li        # send_file() relies on rsync over ssh. Couldn't be better.
487*9c5db199SXin Li        deps = json.loads(stdout)
488*9c5db199SXin Li        for dep in deps[test_name]:
489*9c5db199SXin Li            src = os.path.join(self._telemetry_path, dep)
490*9c5db199SXin Li            dst = os.path.join(DUT_CHROME_ROOT, dep)
491*9c5db199SXin Li            if not os.path.isfile(src):
492*9c5db199SXin Li                raise error.TestFail('Error occurred while saving DEPs.')
493*9c5db199SXin Li            logging.info('Copying: %s -> %s', src, dst)
494*9c5db199SXin Li            dut.send_file(src, dst)
495*9c5db199SXin Li
496*9c5db199SXin Li    @staticmethod
497*9c5db199SXin Li    def convert_chart_json(histogram_set):
498*9c5db199SXin Li        """
499*9c5db199SXin Li        Convert from histogram set to chart json format.
500*9c5db199SXin Li
501*9c5db199SXin Li        @param histogram_set: result in histogram set format.
502*9c5db199SXin Li
503*9c5db199SXin Li        @returns result in chart json format.
504*9c5db199SXin Li        """
505*9c5db199SXin Li        value_map = {}
506*9c5db199SXin Li
507*9c5db199SXin Li        # Gets generic set values.
508*9c5db199SXin Li        for obj in histogram_set:
509*9c5db199SXin Li            if 'type' in obj and obj['type'] == 'GenericSet':
510*9c5db199SXin Li                value_map[obj['guid']] = obj['values']
511*9c5db199SXin Li
512*9c5db199SXin Li        charts = {}
513*9c5db199SXin Li        benchmark_name = ''
514*9c5db199SXin Li        benchmark_desc = ''
515*9c5db199SXin Li
516*9c5db199SXin Li        # Checks the unit test for how this conversion works.
517*9c5db199SXin Li        for obj in histogram_set:
518*9c5db199SXin Li            if 'name' not in obj or 'sampleValues' not in obj:
519*9c5db199SXin Li                continue
520*9c5db199SXin Li            metric_name = obj['name']
521*9c5db199SXin Li            diagnostics = obj['diagnostics']
522*9c5db199SXin Li            if 'stories' in diagnostics:
523*9c5db199SXin Li                story_name = value_map[diagnostics['stories']][0]
524*9c5db199SXin Li            else:
525*9c5db199SXin Li                story_name = 'default'
526*9c5db199SXin Li            local_benchmark_name = value_map[diagnostics['benchmarks']][0]
527*9c5db199SXin Li            if benchmark_name == '':
528*9c5db199SXin Li                benchmark_name = local_benchmark_name
529*9c5db199SXin Li                if 'benchmarkDescriptions' in diagnostics:
530*9c5db199SXin Li                    benchmark_desc = value_map[
531*9c5db199SXin Li                            diagnostics['benchmarkDescriptions']][0]
532*9c5db199SXin Li            if benchmark_name != local_benchmark_name:
533*9c5db199SXin Li                logging.warning(
534*9c5db199SXin Li                        'There are more than 1 benchmark names in the'
535*9c5db199SXin Li                        'result. old: %s, new: %s', benchmark_name,
536*9c5db199SXin Li                        local_benchmark_name)
537*9c5db199SXin Li                continue
538*9c5db199SXin Li
539*9c5db199SXin Li            unit = obj['unit']
540*9c5db199SXin Li            smaller_postfixes = ('_smallerIsBetter', '-')
541*9c5db199SXin Li            bigger_postfixes = ('_biggerIsBetter', '+')
542*9c5db199SXin Li            all_postfixes = smaller_postfixes + bigger_postfixes
543*9c5db199SXin Li
544*9c5db199SXin Li            improvement = 'up'
545*9c5db199SXin Li            for postfix in smaller_postfixes:
546*9c5db199SXin Li                if unit.endswith(postfix):
547*9c5db199SXin Li                    improvement = 'down'
548*9c5db199SXin Li            for postfix in all_postfixes:
549*9c5db199SXin Li                if unit.endswith(postfix):
550*9c5db199SXin Li                    unit = unit[:-len(postfix)]
551*9c5db199SXin Li                    break
552*9c5db199SXin Li
553*9c5db199SXin Li            if unit == 'unitless':
554*9c5db199SXin Li                unit = 'score'
555*9c5db199SXin Li
556*9c5db199SXin Li            values = [
557*9c5db199SXin Li                    x for x in obj['sampleValues']
558*9c5db199SXin Li                    if isinstance(x, numbers.Number)
559*9c5db199SXin Li            ]
560*9c5db199SXin Li            if metric_name not in charts:
561*9c5db199SXin Li                charts[metric_name] = {}
562*9c5db199SXin Li            charts[metric_name][story_name] = {
563*9c5db199SXin Li                    'improvement_direction': improvement,
564*9c5db199SXin Li                    'name': metric_name,
565*9c5db199SXin Li                    'std': numpy.std(values),
566*9c5db199SXin Li                    'type': 'list_of_scalar_values',
567*9c5db199SXin Li                    'units': unit,
568*9c5db199SXin Li                    'values': values
569*9c5db199SXin Li            }
570*9c5db199SXin Li
571*9c5db199SXin Li        # Adds summaries.
572*9c5db199SXin Li        for metric_name in charts:
573*9c5db199SXin Li            values = []
574*9c5db199SXin Li            metric_content = charts[metric_name]
575*9c5db199SXin Li            for story_name in metric_content:
576*9c5db199SXin Li                story_content = metric_content[story_name]
577*9c5db199SXin Li                values += story_content['values']
578*9c5db199SXin Li                metric_type = story_content['type']
579*9c5db199SXin Li                units = story_content['units']
580*9c5db199SXin Li                improvement = story_content['improvement_direction']
581*9c5db199SXin Li            values.sort()
582*9c5db199SXin Li            std = numpy.std(values)
583*9c5db199SXin Li            metric_content['summary'] = {
584*9c5db199SXin Li                    'improvement_direction': improvement,
585*9c5db199SXin Li                    'name': metric_name,
586*9c5db199SXin Li                    'std': std,
587*9c5db199SXin Li                    'type': metric_type,
588*9c5db199SXin Li                    'units': units,
589*9c5db199SXin Li                    'values': values
590*9c5db199SXin Li            }
591*9c5db199SXin Li
592*9c5db199SXin Li        benchmark_metadata = {
593*9c5db199SXin Li                'description': benchmark_desc,
594*9c5db199SXin Li                'name': benchmark_name,
595*9c5db199SXin Li                'type': 'telemetry_benchmark'
596*9c5db199SXin Li        }
597*9c5db199SXin Li        return {
598*9c5db199SXin Li                'benchmark_description': benchmark_desc,
599*9c5db199SXin Li                'benchmark_metadata': benchmark_metadata,
600*9c5db199SXin Li                'benchmark_name': benchmark_name,
601*9c5db199SXin Li                'charts': charts,
602*9c5db199SXin Li                'format_version': 1.0
603*9c5db199SXin Li        }
604*9c5db199SXin Li
605*9c5db199SXin Li
606*9c5db199SXin Liclass LocalTelemetryRunner(TelemetryRunner):
607*9c5db199SXin Li    """Specialized TelemetryRunner to handle local telemetry test runs."""
608*9c5db199SXin Li
609*9c5db199SXin Li    def __init__(self, *args, **kwargs):
610*9c5db199SXin Li        """Initialize LocalTelemetryRunner.
611*9c5db199SXin Li
612*9c5db199SXin Li        The telemetry test will run locally. Depending on whether
613*9c5db199SXin Li        telemetry_on_dut is True or False, there can be possible combinations
614*9c5db199SXin Li        for the execution of this test:
615*9c5db199SXin Li
616*9c5db199SXin Li        telemetry_on_dut=False:
617*9c5db199SXin Li        python run_benchmark --browser=cros-chrome --remote=[dut] [test]
618*9c5db199SXin Li
619*9c5db199SXin Li        telemetry_on_dut=True:
620*9c5db199SXin Li        ssh [dut] python run_benchmark --browser=system [test]
621*9c5db199SXin Li
622*9c5db199SXin Li        @param args: The list of arguments to be passed. See Base class for a
623*9c5db199SXin Li                     complete list of accepted arguments.
624*9c5db199SXin Li        @param kwargs: Any keyword arguments to be passed. See Base class for a
625*9c5db199SXin Li                       complete list of accepted keyword arguments.
626*9c5db199SXin Li        """
627*9c5db199SXin Li        super(LocalTelemetryRunner, self).__init__(*args, **kwargs)
628*9c5db199SXin Li
629*9c5db199SXin Li    def _setup_telemetry(self):
630*9c5db199SXin Li        """Setup Telemetry to use local path to its sources.
631*9c5db199SXin Li
632*9c5db199SXin Li        First look for chrome source root, either externally mounted, or inside
633*9c5db199SXin Li        the chroot.  Prefer chrome-src-internal source tree to chrome-src.
634*9c5db199SXin Li        """
635*9c5db199SXin Li        TELEMETRY_DIR = 'src'
636*9c5db199SXin Li        CHROME_LOCAL_SRC = '/var/cache/chromeos-cache/distfiles/target/'
637*9c5db199SXin Li        CHROME_EXTERNAL_SRC = os.path.expanduser('~/chrome_root/')
638*9c5db199SXin Li
639*9c5db199SXin Li        logging.debug('Setting up telemetry for local testing')
640*9c5db199SXin Li
641*9c5db199SXin Li        sources_list = ('chrome-src-internal', 'chrome-src')
642*9c5db199SXin Li        dir_list = [CHROME_EXTERNAL_SRC]
643*9c5db199SXin Li        dir_list.extend(
644*9c5db199SXin Li                [os.path.join(CHROME_LOCAL_SRC, x) for x in sources_list])
645*9c5db199SXin Li        if 'CHROME_ROOT' in os.environ:
646*9c5db199SXin Li            dir_list.insert(0, os.environ['CHROME_ROOT'])
647*9c5db199SXin Li
648*9c5db199SXin Li        telemetry_src = ''
649*9c5db199SXin Li        for dir in dir_list:
650*9c5db199SXin Li            if os.path.exists(dir):
651*9c5db199SXin Li                telemetry_src = os.path.join(dir, TELEMETRY_DIR)
652*9c5db199SXin Li                break
653*9c5db199SXin Li        else:
654*9c5db199SXin Li            raise error.TestError('Telemetry source directory not found.')
655*9c5db199SXin Li
656*9c5db199SXin Li        self._telemetry_path = telemetry_src
657*9c5db199SXin Li
658*9c5db199SXin Li
659*9c5db199SXin Liclass DroneTelemetryRunner(TelemetryRunner):
660*9c5db199SXin Li    """Handle telemetry test setup on the drone.
661*9c5db199SXin Li
662*9c5db199SXin Li    Users of this class are strongly advised to use this class as a context
663*9c5db199SXin Li    manager. Since the setup for telemetry environment happens on the drone, it
664*9c5db199SXin Li    is imperative that this setup be cleaned up once the test is done. Using
665*9c5db199SXin Li    this class as a context manager will transfer the burden of clean up from
666*9c5db199SXin Li    the user to Python.
667*9c5db199SXin Li    """
668*9c5db199SXin Li
669*9c5db199SXin Li    def __init__(self, *args, **kwargs):
670*9c5db199SXin Li        """Initialize DroneTelemetryRunner.
671*9c5db199SXin Li
672*9c5db199SXin Li        The telemetry test will run on the drone. Depending on whether
673*9c5db199SXin Li        telemetry_on_dut is True or False, there can be possible combinations
674*9c5db199SXin Li        for the execution of this test:
675*9c5db199SXin Li
676*9c5db199SXin Li        telemetry_on_dut=False:
677*9c5db199SXin Li        python run_benchmark --browser=cros-chrome --remote=[dut] [test]
678*9c5db199SXin Li
679*9c5db199SXin Li        telemetry_on_dut=True:
680*9c5db199SXin Li        ssh [dut] python run_benchmark --browser=system [test]
681*9c5db199SXin Li
682*9c5db199SXin Li        @param args: The list of arguments to be passed. See Base class for a
683*9c5db199SXin Li                     complete list of accepted arguments.
684*9c5db199SXin Li        @param kwargs: Any keyword arguments to be passed. See Base class for a
685*9c5db199SXin Li                       complete list of accepted keyword arguments.
686*9c5db199SXin Li        """
687*9c5db199SXin Li        self._telemetry_setup = None
688*9c5db199SXin Li        super(DroneTelemetryRunner, self).__init__(*args, **kwargs)
689*9c5db199SXin Li
690*9c5db199SXin Li    def __enter__(self):
691*9c5db199SXin Li        """Called while entering context manager; does nothing."""
692*9c5db199SXin Li        return self
693*9c5db199SXin Li
694*9c5db199SXin Li    def __exit__(self, exc_type, exc_value, traceback):
695*9c5db199SXin Li        """Called while exiting context manager; cleans up temp files."""
696*9c5db199SXin Li        logging.info('Cleaning up the telemetry environment on the drone.')
697*9c5db199SXin Li        self._telemetry_setup.Cleanup()
698*9c5db199SXin Li
699*9c5db199SXin Li    def _setup_telemetry(self):
700*9c5db199SXin Li        """Setup Telemetry on the drone."""
701*9c5db199SXin Li        logging.debug('Setting up telemetry on the drone')
702*9c5db199SXin Li        info = self._host.host_info_store.get()
703*9c5db199SXin Li        if not info.build:
704*9c5db199SXin Li            logging.error('Unable to locate build label for host: %s.',
705*9c5db199SXin Li                          self._host.host_port)
706*9c5db199SXin Li            raise error.AutotestError('Failed to grab build for host %s.' %
707*9c5db199SXin Li                                      self._host.host_port)
708*9c5db199SXin Li
709*9c5db199SXin Li        logging.debug('Setting up telemetry for build: %s', info.build)
710*9c5db199SXin Li        try:
711*9c5db199SXin Li            self._telemetry_setup = telemetry_setup.TelemetrySetup(
712*9c5db199SXin Li                    hostname=self._host.hostname, build=info.build)
713*9c5db199SXin Li            self._telemetry_path = self._telemetry_setup.Setup()
714*9c5db199SXin Li        except telemetry_setup.TelemetrySetupError as e:
715*9c5db199SXin Li            raise error.AutotestError('Telemetry Environment could not be '
716*9c5db199SXin Li                                      'setup: %s.' % e)
717