1# Lint as: python2, python3 2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6import logging 7import os 8 9from autotest_lib.client.bin import utils 10from autotest_lib.client.common_lib import error 11from autotest_lib.client.cros import perf 12from autotest_lib.client.cros import service_stopper 13from autotest_lib.client.cros.graphics import graphics_utils 14 15 16class graphics_GLBench(graphics_utils.GraphicsTest): 17 """Run glbench, a benchmark that times graphics intensive activities.""" 18 version = 1 19 preserve_srcdir = True 20 _services = None 21 22 glbench_directory = '/usr/local/glbench/' 23 # Good images. 24 reference_images_file = os.path.join(glbench_directory, 25 'files/glbench_reference_images.txt') 26 # Images that are bad but for which the bug has not been fixed yet. 27 knownbad_images_file = os.path.join(glbench_directory, 28 'files/glbench_knownbad_images.txt') 29 # Images that are bad and for which a fix has been submitted. 30 fixedbad_images_file = os.path.join(glbench_directory, 31 'files/glbench_fixedbad_images.txt') 32 33 # These tests do not draw anything, they can only be used to check 34 # performance. 35 no_checksum_tests = set([ 36 'compositing_no_fill', 37 'pixel_read', 38 'texture_reuse_luminance_teximage2d', 39 'texture_reuse_luminance_texsubimage2d', 40 'texture_reuse_rgba_teximage2d', 41 'texture_reuse_rgba_texsubimage2d', 42 'context_glsimple', 43 'swap_glsimple', 44 ]) 45 46 unit_higher_is_better = { 47 'mbytes_sec': True, 48 'mpixels_sec': True, 49 'mtexel_sec': True, 50 'mtri_sec': True, 51 'mvtx_sec': True, 52 'us': False, 53 '1280x768_fps': True 54 } 55 56 def initialize(self): 57 super(graphics_GLBench, self).initialize() 58 # If UI is running, we must stop it and restore later. 59 self._services = service_stopper.ServiceStopper(['ui']) 60 self._services.stop_services() 61 62 def cleanup(self): 63 if self._services: 64 self._services.restore_services() 65 super(graphics_GLBench, self).cleanup() 66 67 def is_no_checksum_test(self, testname): 68 """Check if given test requires no screenshot checksum. 69 70 @param testname: name of test to check. 71 """ 72 for prefix in self.no_checksum_tests: 73 if testname.startswith(prefix): 74 return True 75 return False 76 77 def load_imagenames(self, filename): 78 """Loads text file with MD5 file names. 79 80 @param filename: name of file to load. 81 """ 82 imagenames = os.path.join(self.autodir, filename) 83 with open(imagenames, 'r') as f: 84 imagenames = f.read() 85 return imagenames 86 87 @graphics_utils.GraphicsTest.failure_report_decorator('graphics_GLBench') 88 def run_once(self, options='', hasty=False): 89 """Run the test. 90 91 @param options: String of options to run the glbench executable with. 92 @param hasty: Run the test more quickly by running fewer iterations, 93 lower resolution, and without waiting for the dut to cool down. 94 """ 95 # Run the test, saving is optional and helps with debugging 96 # and reference image management. If unknown images are 97 # encountered one can take them from the outdir and copy 98 # them (after verification) into the reference image dir. 99 exefile = os.path.join(self.glbench_directory, 'bin/glbench') 100 outdir = self.outputdir 101 options += ' -save -outdir=' + outdir 102 # Using the -hasty option we run only a subset of tests without waiting 103 # for thermals to normalize. Test should complete in 15-20 seconds. 104 if hasty: 105 options += ' -hasty' 106 107 cmd = '%s %s' % (exefile, options) 108 summary = None 109 pc_error_reason = None 110 try: 111 if hasty: 112 # On BVT the test will not monitor thermals so we will not verify its 113 # correct status using PerfControl 114 summary = utils.run(cmd, 115 stderr_is_expected=False, 116 stdout_tee=utils.TEE_TO_LOGS, 117 stderr_tee=utils.TEE_TO_LOGS).stdout 118 else: 119 utils.report_temperature(self, 'temperature_1_start') 120 # Wrap the test run inside of a PerfControl instance to make machine 121 # behavior more consistent. 122 with perf.PerfControl() as pc: 123 if not pc.verify_is_valid(): 124 raise error.TestFail('Failed: %s' % pc.get_error_reason()) 125 utils.report_temperature(self, 'temperature_2_before_test') 126 127 # Run the test. If it gets the CPU too hot pc should notice. 128 summary = utils.run(cmd, 129 stderr_is_expected=False, 130 stdout_tee=utils.TEE_TO_LOGS, 131 stderr_tee=utils.TEE_TO_LOGS).stdout 132 if not pc.verify_is_valid(): 133 # Defer error handling until after perf report. 134 pc_error_reason = pc.get_error_reason() 135 except error.CmdError: 136 raise error.TestFail('Failed: CmdError running %s' % cmd) 137 except error.CmdTimeoutError: 138 raise error.TestFail('Failed: CmdTimeout running %s' % cmd) 139 140 # Write a copy of stdout to help debug failures. 141 results_path = os.path.join(self.outputdir, 'summary.txt') 142 f = open(results_path, 'w+') 143 f.write('# ---------------------------------------------------\n') 144 f.write('# [' + cmd + ']\n') 145 f.write(summary) 146 f.write('\n# -------------------------------------------------\n') 147 f.write('# [graphics_GLBench.py postprocessing]\n') 148 149 # Analyze the output. Sample: 150 ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2 151 ## Running: ../glbench -save -outdir=img 152 #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png] 153 results = summary.splitlines() 154 if not results: 155 f.close() 156 raise error.TestFail('Failed: No output from test. Check /tmp/' + 157 'test_that_latest/graphics_GLBench/summary.txt' + 158 ' for details.') 159 160 # The good images, the silenced and the zombie/recurring failures. 161 reference_imagenames = self.load_imagenames(self.reference_images_file) 162 knownbad_imagenames = self.load_imagenames(self.knownbad_images_file) 163 fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file) 164 165 # Check if we saw GLBench end as expected (without crashing). 166 test_ended_normal = False 167 for line in results: 168 if line.strip().startswith('@TEST_END'): 169 test_ended_normal = True 170 171 # Analyze individual test results in summary. 172 # TODO(pwang): Raise TestFail if an error is detected during glbench. 173 keyvals = {} 174 failed_tests = {} 175 for line in results: 176 if not line.strip().startswith('@RESULT: '): 177 continue 178 keyval, remainder = line[9:].split('[') 179 key, val = keyval.split('=') 180 testname = key.strip() 181 score, unit = val.split() 182 testrating = float(score) 183 imagefile = remainder.split(']')[0] 184 185 if not hasty: 186 higher = self.unit_higher_is_better.get(unit) 187 if higher is None: 188 raise error.TestFail('Failed: Unknown test unit "%s" for %s' % 189 (unit, testname)) 190 # Prepend unit to test name to maintain backwards compatibility with 191 # existing per data. 192 perf_value_name = '%s_%s' % (unit, testname) 193 self.output_perf_value( 194 description=perf_value_name, 195 value=testrating, 196 units=unit, 197 higher_is_better=higher, 198 graph=perf_value_name) 199 200 # Classify result image. 201 if testrating == -1.0: 202 # Tests that generate GL Errors. 203 glerror = imagefile.split('=')[1] 204 f.write('# GLError ' + glerror + ' during test (perf set to -3.0)\n') 205 keyvals[testname] = -3.0 206 failed_tests[testname] = 'GLError' 207 elif testrating == 0.0: 208 # Tests for which glbench does not generate a meaningful perf score. 209 f.write('# No score for test\n') 210 keyvals[testname] = 0.0 211 elif imagefile in fixedbad_imagenames: 212 # We know the image looked bad at some point in time but we thought 213 # it was fixed. Throw an exception as a reminder. 214 keyvals[testname] = -2.0 215 f.write('# fixedbad [' + imagefile + '] (setting perf as -2.0)\n') 216 failed_tests[testname] = imagefile 217 elif imagefile in knownbad_imagenames: 218 # We have triaged the failure and have filed a tracking bug. 219 # Don't throw an exception and remind there is a problem. 220 keyvals[testname] = -1.0 221 f.write('# knownbad [' + imagefile + '] (setting perf as -1.0)\n') 222 # This failure is allowlisted so don't add to failed_tests. 223 elif imagefile in reference_imagenames: 224 # Known good reference images (default). 225 keyvals[testname] = testrating 226 elif imagefile == 'none': 227 # Tests that do not write images can't fail because of them. 228 keyvals[testname] = testrating 229 elif self.is_no_checksum_test(testname): 230 # TODO(ihf): these really should not write any images 231 keyvals[testname] = testrating 232 else: 233 # Completely unknown images. Raise a failure. 234 keyvals[testname] = -2.0 235 failed_tests[testname] = imagefile 236 f.write('# unknown [' + imagefile + '] (setting perf as -2.0)\n') 237 f.close() 238 if not hasty: 239 utils.report_temperature(self, 'temperature_3_after_test') 240 self.write_perf_keyval(keyvals) 241 242 # Raise exception if images don't match. 243 if failed_tests: 244 logging.info('Some images are not matching their reference in %s.', 245 self.reference_images_file) 246 logging.info('Please verify that the output images are correct ' 247 'and if so copy them to the reference directory.') 248 raise error.TestFail('Failed: Some images are not matching their ' 249 'references. Check /tmp/' 250 'test_that_latest/graphics_GLBench/summary.txt' 251 ' for details.') 252 253 if not test_ended_normal: 254 raise error.TestFail( 255 'Failed: No end marker. Presumed crash/missing images.') 256 if pc_error_reason: 257 raise error.TestFail('Failed: %s' % pc_error_reason) 258