xref: /aosp_15_r20/external/toolchain-utils/crosperf/experiment_factory_unittest.py (revision 760c253c1ed00ce9abd48f8546f08516e57485fe)
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3
4# Copyright 2013 The ChromiumOS Authors
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Unit test for experiment_factory.py"""
9
10
11import io
12import os
13import socket
14import unittest
15import unittest.mock as mock
16
17import benchmark
18from cros_utils import command_executer
19from cros_utils.file_utils import FileUtils
20import experiment_factory
21from experiment_factory import ExperimentFactory
22from experiment_file import ExperimentFile
23from results_cache import CacheConditions
24import settings_factory
25import test_flag
26
27
28EXPERIMENT_FILE_1 = """
29  board: x86-alex
30  remote: chromeos-alex3
31  locks_dir: /tmp
32
33  benchmark: PageCycler {
34    iterations: 3
35  }
36
37  benchmark: webrtc {
38    iterations: 1
39    test_args: --story-filter=datachannel
40  }
41
42  image1 {
43    chromeos_image: /usr/local/google/cros_image1.bin
44  }
45
46  image2 {
47    chromeos_image: /usr/local/google/cros_image2.bin
48  }
49  """
50
51EXPERIMENT_FILE_2 = """
52  board: x86-alex
53  remote: chromeos-alex3
54  locks_dir: /tmp
55
56  cwp_dso: kallsyms
57
58  benchmark: Octane {
59    iterations: 1
60    suite: telemetry_Crosperf
61    run_local: False
62    weight: 0.8
63  }
64
65  benchmark: Kraken {
66    iterations: 1
67    suite: telemetry_Crosperf
68    run_local: False
69    weight: 0.2
70  }
71
72  image1 {
73    chromeos_image: /usr/local/google/cros_image1.bin
74  }
75  """
76
77# pylint: disable=too-many-function-args
78
79
80class ExperimentFactoryTest(unittest.TestCase):
81    """Class for running experiment factory unittests."""
82
83    def setUp(self):
84        self.append_benchmark_call_args = []
85
86    def testLoadExperimentFile1(self):
87        experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
88        exp = ExperimentFactory().GetExperiment(
89            experiment_file, working_directory="", log_dir=""
90        )
91        self.assertEqual(exp.remote, ["chromeos-alex3"])
92
93        self.assertEqual(len(exp.benchmarks), 2)
94        self.assertEqual(exp.benchmarks[0].name, "PageCycler")
95        self.assertEqual(exp.benchmarks[0].test_name, "PageCycler")
96        self.assertEqual(exp.benchmarks[0].iterations, 3)
97        self.assertEqual(exp.benchmarks[1].name, "webrtc@@datachannel")
98        self.assertEqual(exp.benchmarks[1].test_name, "webrtc")
99        self.assertEqual(exp.benchmarks[1].iterations, 1)
100
101        self.assertEqual(len(exp.labels), 2)
102        self.assertEqual(
103            exp.labels[0].chromeos_image, "/usr/local/google/cros_image1.bin"
104        )
105        self.assertEqual(exp.labels[0].board, "x86-alex")
106
107    def testLoadExperimentFile2CWP(self):
108        experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
109        exp = ExperimentFactory().GetExperiment(
110            experiment_file, working_directory="", log_dir=""
111        )
112        self.assertEqual(exp.cwp_dso, "kallsyms")
113        self.assertEqual(len(exp.benchmarks), 2)
114        self.assertEqual(exp.benchmarks[0].weight, 0.8)
115        self.assertEqual(exp.benchmarks[1].weight, 0.2)
116
117    def testDuplecateBenchmark(self):
118        mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
119        mock_experiment_file.all_settings = []
120        benchmark_settings1 = settings_factory.BenchmarkSettings("name")
121        mock_experiment_file.all_settings.append(benchmark_settings1)
122        benchmark_settings2 = settings_factory.BenchmarkSettings("name")
123        mock_experiment_file.all_settings.append(benchmark_settings2)
124
125        with self.assertRaises(SyntaxError):
126            ef = ExperimentFactory()
127            ef.GetExperiment(mock_experiment_file, "", "")
128
129    def testCWPExceptions(self):
130        mock_experiment_file = ExperimentFile(io.StringIO(""))
131        mock_experiment_file.all_settings = []
132        global_settings = settings_factory.GlobalSettings("test_name")
133        global_settings.SetField("locks_dir", "/tmp")
134
135        # Test 1: DSO type not supported
136        global_settings.SetField("cwp_dso", "test")
137        self.assertEqual(global_settings.GetField("cwp_dso"), "test")
138        mock_experiment_file.global_settings = global_settings
139        with self.assertRaises(RuntimeError) as msg:
140            ef = ExperimentFactory()
141            ef.GetExperiment(mock_experiment_file, "", "")
142        self.assertEqual(
143            "The DSO specified is not supported", str(msg.exception)
144        )
145
146        # Test 2: No weight after DSO specified
147        global_settings.SetField("cwp_dso", "kallsyms")
148        mock_experiment_file.global_settings = global_settings
149        benchmark_settings = settings_factory.BenchmarkSettings("name")
150        mock_experiment_file.all_settings.append(benchmark_settings)
151        with self.assertRaises(RuntimeError) as msg:
152            ef = ExperimentFactory()
153            ef.GetExperiment(mock_experiment_file, "", "")
154        self.assertEqual(
155            "With DSO specified, each benchmark should have a weight",
156            str(msg.exception),
157        )
158
159        # Test 3: Weight is set, but no dso specified
160        global_settings.SetField("cwp_dso", "")
161        mock_experiment_file.global_settings = global_settings
162        benchmark_settings = settings_factory.BenchmarkSettings("name")
163        benchmark_settings.SetField("weight", "0.8")
164        mock_experiment_file.all_settings = []
165        mock_experiment_file.all_settings.append(benchmark_settings)
166        with self.assertRaises(RuntimeError) as msg:
167            ef = ExperimentFactory()
168            ef.GetExperiment(mock_experiment_file, "", "")
169        self.assertEqual(
170            "Weight can only be set when DSO specified", str(msg.exception)
171        )
172
173        # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
174        global_settings.SetField("cwp_dso", "kallsyms")
175        mock_experiment_file.global_settings = global_settings
176        benchmark_settings = settings_factory.BenchmarkSettings("name")
177        benchmark_settings.SetField("weight", "0.8")
178        mock_experiment_file.all_settings = []
179        mock_experiment_file.all_settings.append(benchmark_settings)
180        with self.assertRaises(RuntimeError) as msg:
181            ef = ExperimentFactory()
182            ef.GetExperiment(mock_experiment_file, "", "")
183        self.assertEqual(
184            "CWP approximation weight only works with "
185            "telemetry_Crosperf suite",
186            str(msg.exception),
187        )
188
189        # Test 5: cwp_dso does not work for local run
190        benchmark_settings = settings_factory.BenchmarkSettings("name")
191        benchmark_settings.SetField("weight", "0.8")
192        benchmark_settings.SetField("suite", "telemetry_Crosperf")
193        benchmark_settings.SetField("run_local", "True")
194        mock_experiment_file.all_settings = []
195        mock_experiment_file.all_settings.append(benchmark_settings)
196        with self.assertRaises(RuntimeError) as msg:
197            ef = ExperimentFactory()
198            ef.GetExperiment(mock_experiment_file, "", "")
199        self.assertEqual(
200            "run_local must be set to False to use CWP approximation",
201            str(msg.exception),
202        )
203
204        # Test 6: weight should be float >=0
205        benchmark_settings = settings_factory.BenchmarkSettings("name")
206        benchmark_settings.SetField("weight", "-1.2")
207        benchmark_settings.SetField("suite", "telemetry_Crosperf")
208        benchmark_settings.SetField("run_local", "False")
209        mock_experiment_file.all_settings = []
210        mock_experiment_file.all_settings.append(benchmark_settings)
211        with self.assertRaises(RuntimeError) as msg:
212            ef = ExperimentFactory()
213            ef.GetExperiment(mock_experiment_file, "", "")
214        self.assertEqual("Weight should be a float >=0", str(msg.exception))
215
216        # Test 7: more than one story tag in test_args
217        benchmark_settings = settings_factory.BenchmarkSettings("name")
218        benchmark_settings.SetField(
219            "test_args", "--story-filter=a --story-tag-filter=b"
220        )
221        benchmark_settings.SetField("weight", "1.2")
222        benchmark_settings.SetField("suite", "telemetry_Crosperf")
223        mock_experiment_file.all_settings = []
224        mock_experiment_file.all_settings.append(benchmark_settings)
225        with self.assertRaises(RuntimeError) as msg:
226            ef = ExperimentFactory()
227            ef.GetExperiment(mock_experiment_file, "", "")
228        self.assertEqual(
229            "Only one story or story-tag filter allowed in a single "
230            "benchmark run",
231            str(msg.exception),
232        )
233
234        # Test 8: Iterations of each benchmark run are not same in cwp mode
235        mock_experiment_file.all_settings = []
236        benchmark_settings = settings_factory.BenchmarkSettings("name1")
237        benchmark_settings.SetField("iterations", "4")
238        benchmark_settings.SetField("weight", "1.2")
239        benchmark_settings.SetField("suite", "telemetry_Crosperf")
240        benchmark_settings.SetField("run_local", "False")
241        mock_experiment_file.all_settings.append(benchmark_settings)
242        benchmark_settings = settings_factory.BenchmarkSettings("name2")
243        benchmark_settings.SetField("iterations", "3")
244        benchmark_settings.SetField("weight", "1.2")
245        benchmark_settings.SetField("suite", "telemetry_Crosperf")
246        benchmark_settings.SetField("run_local", "False")
247        mock_experiment_file.all_settings.append(benchmark_settings)
248        with self.assertRaises(RuntimeError) as msg:
249            ef = ExperimentFactory()
250            ef.GetExperiment(mock_experiment_file, "", "")
251        self.assertEqual(
252            "Iterations of each benchmark run are not the same",
253            str(msg.exception),
254        )
255
256    def test_append_benchmark_set(self):
257        ef = ExperimentFactory()
258
259        bench_list = []
260        ef.AppendBenchmarkSet(
261            bench_list,
262            experiment_factory.telemetry_crosbolt_perf_tests,
263            "",
264            1,
265            False,
266            "",
267            "telemetry_Crosperf",
268            False,
269            0,
270            False,
271            "",
272            0,
273        )
274        self.assertEqual(
275            len(bench_list),
276            len(experiment_factory.telemetry_crosbolt_perf_tests),
277        )
278        self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
279
280        bench_list = []
281        ef.AppendBenchmarkSet(
282            bench_list,
283            experiment_factory.telemetry_toolchain_perf_tests,
284            "",
285            1,
286            False,
287            "",
288            "telemetry_Crosperf",
289            False,
290            0,
291            False,
292            "",
293            0,
294        )
295        self.assertEqual(
296            len(bench_list),
297            len(experiment_factory.telemetry_toolchain_perf_tests),
298        )
299        self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
300
301        bench_list = []
302        ef.AppendBenchmarkSet(
303            bench_list,
304            experiment_factory.telemetry_toolchain_perf_tests,
305            "",
306            1,
307            False,
308            "",
309            "telemetry_Crosperf",
310            False,
311            0,
312            False,
313            "",
314            0,
315        )
316        self.assertEqual(
317            len(bench_list),
318            len(experiment_factory.telemetry_toolchain_perf_tests),
319        )
320        self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
321
322    @mock.patch.object(socket, "gethostname")
323    def test_get_experiment(self, mock_socket):
324        test_flag.SetTestMode(False)
325        self.append_benchmark_call_args = []
326
327        def FakeAppendBenchmarkSet(
328            bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
329        ):
330            "Helper function for test_get_experiment"
331            arg_list = [
332                bench_list,
333                set_list,
334                args,
335                iters,
336                rm_ch,
337                perf_args,
338                suite,
339                show_all,
340            ]
341            self.append_benchmark_call_args.append(arg_list)
342
343        def FakeGetDefaultRemotes(board):
344            if not board:
345                return []
346            return [
347                "fake_chromeos_machine1.cros",
348                "fake_chromeos_machine2.cros",
349            ]
350
351        def FakeGetXbuddyPath(
352            build, autotest_dir, debug_dir, board, chroot, log_level, perf_args
353        ):
354            autotest_path = autotest_dir
355            if not autotest_path:
356                autotest_path = "fake_autotest_path"
357            debug_path = debug_dir
358            if not debug_path and perf_args:
359                debug_path = "fake_debug_path"
360            if not build or not board or not chroot or not log_level:
361                return "", autotest_path, debug_path
362            return "fake_image_path", autotest_path, debug_path
363
364        ef = ExperimentFactory()
365        ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
366        ef.GetDefaultRemotes = FakeGetDefaultRemotes
367
368        label_settings = settings_factory.LabelSettings("image_label")
369        benchmark_settings = settings_factory.BenchmarkSettings("bench_test")
370        global_settings = settings_factory.GlobalSettings("test_name")
371
372        label_settings.GetXbuddyPath = FakeGetXbuddyPath
373
374        mock_experiment_file = ExperimentFile(io.StringIO(""))
375        mock_experiment_file.all_settings = []
376
377        test_flag.SetTestMode(True)
378        # Basic test.
379        global_settings.SetField("name", "unittest_test")
380        global_settings.SetField("board", "lumpy")
381        global_settings.SetField("locks_dir", "/tmp")
382        global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
383        benchmark_settings.SetField("test_name", "kraken")
384        benchmark_settings.SetField("suite", "telemetry_Crosperf")
385        benchmark_settings.SetField("iterations", 1)
386        label_settings.SetField(
387            "chromeos_image",
388            "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin",
389        )
390        label_settings.SetField(
391            "chrome_src", "/usr/local/google/home/chrome-top"
392        )
393        label_settings.SetField("autotest_path", "/tmp/autotest")
394
395        mock_experiment_file.global_settings = global_settings
396        mock_experiment_file.all_settings.append(label_settings)
397        mock_experiment_file.all_settings.append(benchmark_settings)
398        mock_experiment_file.all_settings.append(global_settings)
399
400        mock_socket.return_value = ""
401
402        # First test. General test.
403        exp = ef.GetExperiment(mock_experiment_file, "", "")
404        self.assertCountEqual(exp.remote, ["123.45.67.89", "123.45.76.80"])
405        self.assertEqual(exp.cache_conditions, [0, 2, 1])
406        self.assertEqual(exp.log_level, "average")
407
408        self.assertEqual(len(exp.benchmarks), 1)
409        self.assertEqual(exp.benchmarks[0].name, "bench_test")
410        self.assertEqual(exp.benchmarks[0].test_name, "kraken")
411        self.assertEqual(exp.benchmarks[0].iterations, 1)
412        self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf")
413        self.assertFalse(exp.benchmarks[0].show_all_results)
414
415        self.assertEqual(len(exp.labels), 1)
416        self.assertEqual(
417            exp.labels[0].chromeos_image,
418            "chromeos/src/build/images/lumpy/latest/"
419            "chromiumos_test_image.bin",
420        )
421        self.assertEqual(exp.labels[0].autotest_path, "/tmp/autotest")
422        self.assertEqual(exp.labels[0].board, "lumpy")
423        self.assertEqual(exp.machine_manager.keep_stateful, False)
424
425        # Second test: Remotes listed in labels.
426        test_flag.SetTestMode(True)
427        label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
428        # Also verify keep_stateful.
429        global_settings.SetField("keep_stateful", "true")
430        exp = ef.GetExperiment(mock_experiment_file, "", "")
431        self.assertCountEqual(
432            exp.remote,
433            [
434                "123.45.67.89",
435                "123.45.76.80",
436                "chromeos1.cros",
437                "chromeos2.cros",
438            ],
439        )
440        # keep_stateful is propagated to machine_manager which flashes the
441        # images.
442        self.assertEqual(exp.machine_manager.keep_stateful, True)
443
444        # Third test: Automatic fixing of bad  logging_level param:
445        global_settings.SetField("logging_level", "really loud!")
446        exp = ef.GetExperiment(mock_experiment_file, "", "")
447        self.assertEqual(exp.log_level, "verbose")
448
449        # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
450        global_settings.SetField("rerun_if_failed", "true")
451        global_settings.SetField("rerun", "true")
452        global_settings.SetField("same_machine", "true")
453        global_settings.SetField("same_specs", "true")
454
455        self.assertRaises(
456            Exception, ef.GetExperiment, mock_experiment_file, "", ""
457        )
458        label_settings.SetField("remote", "")
459        global_settings.SetField("remote", "123.45.67.89")
460        exp = ef.GetExperiment(mock_experiment_file, "", "")
461        self.assertEqual(
462            exp.cache_conditions,
463            [
464                CacheConditions.CACHE_FILE_EXISTS,
465                CacheConditions.CHECKSUMS_MATCH,
466                CacheConditions.RUN_SUCCEEDED,
467                CacheConditions.FALSE,
468                CacheConditions.SAME_MACHINE_MATCH,
469                CacheConditions.MACHINES_MATCH,
470            ],
471        )
472
473        # Check the alias option to ignore cache.
474        global_settings.SetField("rerun", "false")
475        global_settings.SetField("ignore_cache", "true")
476        exp = ef.GetExperiment(mock_experiment_file, "", "")
477        self.assertEqual(
478            exp.cache_conditions,
479            [
480                CacheConditions.CACHE_FILE_EXISTS,
481                CacheConditions.CHECKSUMS_MATCH,
482                CacheConditions.RUN_SUCCEEDED,
483                CacheConditions.FALSE,
484                CacheConditions.SAME_MACHINE_MATCH,
485                CacheConditions.MACHINES_MATCH,
486            ],
487        )
488        # Check without cache use.
489        global_settings.SetField("rerun", "false")
490        global_settings.SetField("ignore_cache", "false")
491        exp = ef.GetExperiment(mock_experiment_file, "", "")
492        self.assertEqual(
493            exp.cache_conditions,
494            [
495                CacheConditions.CACHE_FILE_EXISTS,
496                CacheConditions.CHECKSUMS_MATCH,
497                CacheConditions.RUN_SUCCEEDED,
498                CacheConditions.SAME_MACHINE_MATCH,
499                CacheConditions.MACHINES_MATCH,
500            ],
501        )
502
503        # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
504        # remotes (Call GetDefaultRemotes).
505        mock_socket.return_value = "test.corp.google.com"
506        global_settings.SetField("remote", "")
507        global_settings.SetField("same_machine", "false")
508
509        label_settings_2 = settings_factory.LabelSettings(
510            "official_image_label"
511        )
512        label_settings_2.SetField("chromeos_root", "chromeos")
513        label_settings_2.SetField("build", "official-dev")
514        label_settings_2.SetField("autotest_path", "")
515        label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
516
517        mock_experiment_file.all_settings.append(label_settings_2)
518        exp = ef.GetExperiment(mock_experiment_file, "", "")
519        self.assertEqual(len(exp.labels), 2)
520        self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path")
521        self.assertEqual(exp.labels[1].autotest_path, "fake_autotest_path")
522        self.assertCountEqual(
523            exp.remote,
524            ["fake_chromeos_machine1.cros", "fake_chromeos_machine2.cros"],
525        )
526
527    def test_get_default_remotes(self):
528        board_list = [
529            "bob",
530            "chell",
531            "coral",
532            "elm",
533            "nautilus",
534            "snappy",
535        ]
536
537        ef = ExperimentFactory()
538        self.assertRaises(Exception, ef.GetDefaultRemotes, "bad-board")
539
540        # Verify that we have entries for every board
541        for b in board_list:
542            remotes = ef.GetDefaultRemotes(b)
543            self.assertGreaterEqual(len(remotes), 1)
544
545    @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
546    @mock.patch.object(os.path, "exists")
547    def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
548        ef = ExperimentFactory()
549        chromeos_root = "/tmp/chromeos"
550        log_level = "average"
551
552        mock_exists.return_value = True
553        ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
554        self.assertTrue(ret)
555
556        mock_exists.return_value = False
557        mock_runcmd.return_value = 1
558        with self.assertRaises(RuntimeError) as err:
559            ef.CheckCrosfleetTool(chromeos_root, log_level)
560        self.assertEqual(mock_runcmd.call_count, 1)
561        self.assertEqual(
562            str(err.exception),
563            "Crosfleet tool not installed "
564            "correctly, please try to manually install it from "
565            "/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools",
566        )
567
568        mock_runcmd.return_value = 0
569        mock_runcmd.call_count = 0
570        ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
571        self.assertEqual(mock_runcmd.call_count, 1)
572        self.assertFalse(ret)
573
574
575if __name__ == "__main__":
576    FileUtils.Configure(True)
577    test_flag.SetTestMode(True)
578    unittest.main()
579