1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3# Copyright 2016 The ChromiumOS Authors 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7"""Test for generate_report.py.""" 8 9 10import copy 11import json 12import unittest 13import unittest.mock as mock 14 15import generate_report 16import results_report 17import test_flag 18 19 20# pylint: disable=deprecated-module 21try: 22 from StringIO import StringIO # for Python 2 23except ImportError: 24 from io import StringIO # for Python 3 25 26 27class _ContextualStringIO(StringIO): 28 """StringIO that can be used in `with` statements.""" 29 30 def __init__(self, *args): 31 StringIO.__init__(self, *args) 32 33 def __enter__(self): 34 return self 35 36 def __exit__(self, _type, _value, _traceback): 37 pass 38 39 40class GenerateReportTests(unittest.TestCase): 41 """Tests for generate_report.py.""" 42 43 def testCountBenchmarks(self): 44 runs = { 45 "foo": [[{}, {}, {}], [{}, {}, {}, {}]], 46 "bar": [], 47 "baz": [[], [{}], [{}, {}, {}]], 48 } 49 results = generate_report.CountBenchmarks(runs) 50 expected_results = [("foo", 4), ("bar", 0), ("baz", 3)] 51 self.assertCountEqual(expected_results, results) 52 53 def testCutResultsInPlace(self): 54 bench_data = { 55 "foo": [[{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 2.5, "c": 1}]], 56 "bar": [[{"d": 11, "e": 12, "f": 13}]], 57 "baz": [[{"g": 12, "h": 13}]], 58 "qux": [[{"i": 11}]], 59 } 60 original_bench_data = copy.deepcopy(bench_data) 61 62 max_keys = 2 63 results = generate_report.CutResultsInPlace( 64 bench_data, max_keys=max_keys, complain_on_update=False 65 ) 66 # Cuts should be in-place. 67 self.assertIs(results, bench_data) 68 self.assertCountEqual( 69 list(original_bench_data.keys()), list(bench_data.keys()) 70 ) 71 for bench_name, original_runs in original_bench_data.items(): 72 bench_runs = bench_data[bench_name] 73 self.assertEqual(len(original_runs), len(bench_runs)) 74 # Order of these sub-lists shouldn't have changed. 75 for original_list, new_list in zip(original_runs, bench_runs): 76 self.assertEqual(len(original_list), len(new_list)) 77 for original_keyvals, sub_keyvals in zip( 78 original_list, new_list 79 ): 80 # sub_keyvals must be a subset of original_keyvals 81 self.assertDictContainsSubset(sub_keyvals, original_keyvals) 82 83 def testCutResultsInPlaceLeavesRetval(self): 84 bench_data = { 85 "foo": [[{"retval": 0, "a": 1}]], 86 "bar": [[{"retval": 1}]], 87 "baz": [[{"RETVAL": 1}]], 88 } 89 results = generate_report.CutResultsInPlace( 90 bench_data, max_keys=0, complain_on_update=False 91 ) 92 # Just reach into results assuming we know it otherwise outputs things in 93 # the expected way. If it doesn't, testCutResultsInPlace should give an 94 # indication as to what, exactly, is broken. 95 self.assertEqual(list(results["foo"][0][0].items()), [("retval", 0)]) 96 self.assertEqual(list(results["bar"][0][0].items()), [("retval", 1)]) 97 self.assertEqual(list(results["baz"][0][0].items()), []) 98 99 def _RunMainWithInput(self, args, input_obj): 100 assert "-i" not in args 101 args += ["-i", "-"] 102 input_buf = _ContextualStringIO(json.dumps(input_obj)) 103 with mock.patch( 104 "generate_report.PickInputFile", return_value=input_buf 105 ) as patched_pick: 106 result = generate_report.Main(args) 107 patched_pick.assert_called_once_with("-") 108 return result 109 110 @mock.patch("generate_report.RunActions") 111 def testMain(self, mock_run_actions): 112 # Email is left out because it's a bit more difficult to test, and it'll be 113 # mildly obvious if it's failing. 114 args = ["--json", "--html", "--text"] 115 return_code = self._RunMainWithInput( 116 args, {"platforms": [], "data": {}} 117 ) 118 self.assertEqual(0, return_code) 119 self.assertEqual(mock_run_actions.call_count, 1) 120 ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] 121 self.assertEqual( 122 ctors, 123 [ 124 results_report.JSONResultsReport, 125 results_report.TextResultsReport, 126 results_report.HTMLResultsReport, 127 ], 128 ) 129 130 @mock.patch("generate_report.RunActions") 131 def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions): 132 args = [] 133 return_code = self._RunMainWithInput( 134 args, {"platforms": [], "data": {}} 135 ) 136 self.assertEqual(0, return_code) 137 self.assertEqual(mock_run_actions.call_count, 1) 138 ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]] 139 self.assertEqual(ctors, [results_report.HTMLResultsReport]) 140 141 # We only mock print_exc so we don't have exception info printed to stdout. 142 @mock.patch("generate_report.WriteFile", side_effect=ValueError("Oh noo")) 143 @mock.patch("traceback.print_exc") 144 def testRunActionsRunsAllActionsRegardlessOfExceptions( 145 self, mock_print_exc, mock_write_file 146 ): 147 actions = [ 148 (None, "json"), 149 (None, "html"), 150 (None, "text"), 151 (None, "email"), 152 ] 153 output_prefix = "-" 154 ok = generate_report.RunActions( 155 actions, {}, output_prefix, overwrite=False, verbose=False 156 ) 157 self.assertFalse(ok) 158 self.assertEqual(mock_write_file.call_count, len(actions)) 159 self.assertEqual(mock_print_exc.call_count, len(actions)) 160 161 @mock.patch("generate_report.WriteFile") 162 def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file): 163 actions = [(None, "json"), (None, "html"), (None, "text")] 164 output_prefix = "-" 165 ok = generate_report.RunActions( 166 actions, {}, output_prefix, overwrite=False, verbose=False 167 ) 168 self.assertEqual(mock_write_file.call_count, len(actions)) 169 self.assertTrue(ok) 170 171 172if __name__ == "__main__": 173 test_flag.SetTestMode(True) 174 unittest.main() 175