xref: /aosp_15_r20/external/bazel-skylib/lib/unittest.bzl (revision bcb5dc7965af6ee42bf2f21341a2ec00233a8c8a)
1# Copyright 2017 The Bazel Authors. All rights reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#    http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Unit testing support.
16
17Unlike most Skylib files, this exports four modules:
18* `unittest` contains functions to declare and define unit tests for ordinary
19   Starlark functions;
20* `analysistest` contains functions to declare and define tests for analysis
21   phase behavior of a rule, such as a given target's providers or registered
22   actions;
23* `loadingtest` contains functions to declare and define tests for loading
24   phase behavior, such as macros and `native.*`;
25* `asserts` contains the assertions used within tests.
26
27See https://bazel.build/extending/concepts for background about macros, rules,
28and the different phases of a build.
29"""
30
31load(":new_sets.bzl", new_sets = "sets")
32load(":partial.bzl", "partial")
33load(":types.bzl", "types")
34
35# The following function should only be called from WORKSPACE files and workspace macros.
36# buildifier: disable=unnamed-macro
37def register_unittest_toolchains():
38    """Registers the toolchains for unittest users."""
39    native.register_toolchains(
40        "@bazel_skylib//toolchains/unittest:cmd_toolchain",
41        "@bazel_skylib//toolchains/unittest:bash_toolchain",
42    )
43
44TOOLCHAIN_TYPE = "@bazel_skylib//toolchains/unittest:toolchain_type"
45
46_UnittestToolchainInfo = provider(
47    doc = "Execution platform information for rules in the bazel_skylib repository.",
48    fields = [
49        "file_ext",
50        "success_templ",
51        "failure_templ",
52        "join_on",
53        "escape_chars_with",
54        "escape_other_chars_with",
55    ],
56)
57
58def _unittest_toolchain_impl(ctx):
59    return [
60        platform_common.ToolchainInfo(
61            unittest_toolchain_info = _UnittestToolchainInfo(
62                file_ext = ctx.attr.file_ext,
63                success_templ = ctx.attr.success_templ,
64                failure_templ = ctx.attr.failure_templ,
65                join_on = ctx.attr.join_on,
66                escape_chars_with = ctx.attr.escape_chars_with,
67                escape_other_chars_with = ctx.attr.escape_other_chars_with,
68            ),
69        ),
70    ]
71
72unittest_toolchain = rule(
73    implementation = _unittest_toolchain_impl,
74    attrs = {
75        "failure_templ": attr.string(
76            mandatory = True,
77            doc = (
78                "Test script template with a single `%s`. That " +
79                "placeholder is replaced with the lines in the " +
80                "failure message joined with the string " +
81                "specified in `join_with`. The resulting script " +
82                "should print the failure message and exit with " +
83                "non-zero status."
84            ),
85        ),
86        "file_ext": attr.string(
87            mandatory = True,
88            doc = (
89                "File extension for test script, including leading dot."
90            ),
91        ),
92        "join_on": attr.string(
93            mandatory = True,
94            doc = (
95                "String used to join the lines in the failure " +
96                "message before including the resulting string " +
97                "in the script specified in `failure_templ`."
98            ),
99        ),
100        "success_templ": attr.string(
101            mandatory = True,
102            doc = (
103                "Test script generated when the test passes. " +
104                "Should exit with status 0."
105            ),
106        ),
107        "escape_chars_with": attr.string_dict(
108            doc = (
109                "Dictionary of characters that need escaping in " +
110                "test failure message to prefix appended to escape " +
111                "those characters. For example, " +
112                '`{"%": "%", ">": "^"}` would replace `%` with ' +
113                "`%%` and `>` with `^>` in the failure message " +
114                "before that is included in `success_templ`."
115            ),
116        ),
117        "escape_other_chars_with": attr.string(
118            default = "",
119            doc = (
120                "String to prefix every character in test failure " +
121                "message which is not a key in `escape_chars_with` " +
122                "before including that in `success_templ`. For " +
123                'example, `"\"` would prefix every character in ' +
124                "the failure message (except those in the keys of " +
125                "`escape_chars_with`) with `\\`."
126            ),
127        ),
128    },
129)
130
131def _impl_function_name(impl):
132    """Derives the name of the given rule implementation function.
133
134    This can be used for better test feedback.
135
136    Args:
137      impl: the rule implementation function
138
139    Returns:
140      The name of the given function
141    """
142
143    # Starlark currently stringifies a function as "<function NAME>", so we use
144    # that knowledge to parse the "NAME" portion out. If this behavior ever
145    # changes, we'll need to update this.
146    # TODO(bazel-team): Expose a ._name field on functions to avoid this.
147    impl_name = str(impl)
148    impl_name = impl_name.partition("<function ")[-1]
149    return impl_name.rpartition(">")[0]
150
151def _make(impl, attrs = {}, doc = "", toolchains = []):
152    """Creates a unit test rule from its implementation function.
153
154    Each unit test is defined in an implementation function that must then be
155    associated with a rule so that a target can be built. This function handles
156    the boilerplate to create and return a test rule and captures the
157    implementation function's name so that it can be printed in test feedback.
158
159    The optional `attrs` argument can be used to define dependencies for this
160    test, in order to form unit tests of rules.
161
162    The optional `toolchains` argument can be used to define toolchain
163    dependencies for this test.
164
165    An example of a unit test:
166
167    ```
168    def _your_test(ctx):
169      env = unittest.begin(ctx)
170
171      # Assert statements go here
172
173      return unittest.end(env)
174
175    your_test = unittest.make(_your_test)
176    ```
177
178    Recall that names of test rules must end in `_test`.
179
180    Args:
181      impl: The implementation function of the unit test.
182      attrs: An optional dictionary to supplement the attrs passed to the
183          unit test's `rule()` constructor.
184      doc: A description of the rule that can be extracted by documentation generating tools.
185      toolchains: An optional list to supplement the toolchains passed to
186          the unit test's `rule()` constructor.
187
188    Returns:
189      A rule definition that should be stored in a global whose name ends in
190      `_test`.
191    """
192    attrs = dict(attrs)
193    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
194
195    return rule(
196        impl,
197        doc = doc,
198        attrs = attrs,
199        _skylark_testable = True,
200        test = True,
201        toolchains = toolchains + [TOOLCHAIN_TYPE],
202    )
203
204_ActionInfo = provider(
205    doc = "Information relating to the target under test.",
206    fields = ["actions", "bin_path"],
207)
208
209def _action_retrieving_aspect_impl(target, ctx):
210    return [
211        _ActionInfo(
212            actions = target.actions,
213            bin_path = ctx.bin_dir.path,
214        ),
215    ]
216
217_action_retrieving_aspect = aspect(
218    attr_aspects = [],
219    implementation = _action_retrieving_aspect_impl,
220)
221
222# TODO(cparsons): Provide more full documentation on analysis testing in README.
223def _make_analysis_test(
224        impl,
225        expect_failure = False,
226        attrs = {},
227        fragments = [],
228        config_settings = {},
229        extra_target_under_test_aspects = [],
230        doc = ""):
231    """Creates an analysis test rule from its implementation function.
232
233    An analysis test verifies the behavior of a "real" rule target by examining
234    and asserting on the providers given by the real target.
235
236    Each analysis test is defined in an implementation function that must then be
237    associated with a rule so that a target can be built. This function handles
238    the boilerplate to create and return a test rule and captures the
239    implementation function's name so that it can be printed in test feedback.
240
241    An example of an analysis test:
242
243    ```
244    def _your_test(ctx):
245      env = analysistest.begin(ctx)
246
247      # Assert statements go here
248
249      return analysistest.end(env)
250
251    your_test = analysistest.make(_your_test)
252    ```
253
254    Recall that names of test rules must end in `_test`.
255
256    Args:
257      impl: The implementation function of the unit test.
258      expect_failure: If true, the analysis test will expect the target_under_test
259          to fail. Assertions can be made on the underlying failure using asserts.expect_failure
260      attrs: An optional dictionary to supplement the attrs passed to the
261          unit test's `rule()` constructor.
262      fragments: An optional list of fragment names that can be used to give rules access to
263          language-specific parts of configuration.
264      config_settings: A dictionary of configuration settings to change for the target under
265          test and its dependencies. This may be used to essentially change 'build flags' for
266          the target under test, and may thus be utilized to test multiple targets with different
267          flags in a single build
268      extra_target_under_test_aspects: An optional list of aspects to apply to the target_under_test
269          in addition to those set up by default for the test harness itself.
270      doc: A description of the rule that can be extracted by documentation generating tools.
271
272    Returns:
273      A rule definition that should be stored in a global whose name ends in
274      `_test`.
275    """
276    attrs = dict(attrs)
277    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
278
279    changed_settings = dict(config_settings)
280    if expect_failure:
281        changed_settings["//command_line_option:allow_analysis_failures"] = "True"
282
283    target_attr_kwargs = {}
284    if changed_settings:
285        test_transition = analysis_test_transition(
286            settings = changed_settings,
287        )
288        target_attr_kwargs["cfg"] = test_transition
289
290    attrs["target_under_test"] = attr.label(
291        aspects = [_action_retrieving_aspect] + extra_target_under_test_aspects,
292        mandatory = True,
293        **target_attr_kwargs
294    )
295
296    return rule(
297        impl,
298        doc = doc,
299        attrs = attrs,
300        fragments = fragments,
301        test = True,
302        toolchains = [TOOLCHAIN_TYPE],
303        analysis_test = True,
304    )
305
306def _suite(name, *test_rules):
307    """Defines a `test_suite` target that contains multiple tests.
308
309    After defining your test rules in a `.bzl` file, you need to create targets
310    from those rules so that `blaze test` can execute them. Doing this manually
311    in a BUILD file would consist of listing each test in your `load` statement
312    and then creating each target one by one. To reduce duplication, we recommend
313    writing a macro in your `.bzl` file to instantiate all targets, and calling
314    that macro from your BUILD file so you only have to load one symbol.
315
316    You can use this function to create the targets and wrap them in a single
317    test_suite target. If a test rule requires no arguments, you can simply list
318    it as an argument. If you wish to supply attributes explicitly, you can do so
319    using `partial.make()`. For instance, in your `.bzl` file, you could write:
320
321    ```
322    def your_test_suite():
323      unittest.suite(
324          "your_test_suite",
325          your_test,
326          your_other_test,
327          partial.make(yet_another_test, timeout = "short"),
328      )
329    ```
330
331    Then, in your `BUILD` file, simply load the macro and invoke it to have all
332    of the targets created:
333
334    ```
335    load("//path/to/your/package:tests.bzl", "your_test_suite")
336    your_test_suite()
337    ```
338
339    If you pass _N_ unit test rules to `unittest.suite`, _N_ + 1 targets will be
340    created: a `test_suite` target named `${name}` (where `${name}` is the name
341    argument passed in here) and targets named `${name}_test_${i}`, where `${i}`
342    is the index of the test in the `test_rules` list, which is used to uniquely
343    name each target.
344
345    Args:
346      name: The name of the `test_suite` target, and the prefix of all the test
347          target names.
348      *test_rules: A list of test rules defines by `unittest.test`.
349    """
350    test_names = []
351    for index, test_rule in enumerate(test_rules):
352        test_name = "%s_test_%d" % (name, index)
353        if partial.is_instance(test_rule):
354            partial.call(test_rule, name = test_name)
355        else:
356            test_rule(name = test_name)
357        test_names.append(test_name)
358
359    native.test_suite(
360        name = name,
361        tests = [":%s" % t for t in test_names],
362    )
363
364def _begin(ctx):
365    """Begins a unit test.
366
367    This should be the first function called in a unit test implementation
368    function. It initializes a "test environment" that is used to collect
369    assertion failures so that they can be reported and logged at the end of the
370    test.
371
372    Args:
373      ctx: The Starlark context. Pass the implementation function's `ctx` argument
374          in verbatim.
375
376    Returns:
377      A test environment struct that must be passed to assertions and finally to
378      `unittest.end`. Do not rely on internal details about the fields in this
379      struct as it may change.
380    """
381    return struct(ctx = ctx, failures = [])
382
383def _begin_analysis_test(ctx):
384    """Begins an analysis test.
385
386    This should be the first function called in an analysis test implementation
387    function. It initializes a "test environment" that is used to collect
388    assertion failures so that they can be reported and logged at the end of the
389    test.
390
391    Args:
392      ctx: The Starlark context. Pass the implementation function's `ctx` argument
393          in verbatim.
394
395    Returns:
396      A test environment struct that must be passed to assertions and finally to
397      `analysistest.end`. Do not rely on internal details about the fields in this
398      struct as it may change.
399    """
400    return struct(ctx = ctx, failures = [])
401
402def _end_analysis_test(env):
403    """Ends an analysis test and logs the results.
404
405    This must be called and returned at the end of an analysis test implementation function so
406    that the results are reported.
407
408    Args:
409      env: The test environment returned by `analysistest.begin`.
410
411    Returns:
412      A list of providers needed to automatically register the analysis test result.
413    """
414    return [AnalysisTestResultInfo(
415        success = (len(env.failures) == 0),
416        message = "\n".join(env.failures),
417    )]
418
419def _end(env):
420    """Ends a unit test and logs the results.
421
422    This must be called and returned at the end of a unit test implementation function so
423    that the results are reported.
424
425    Args:
426      env: The test environment returned by `unittest.begin`.
427
428    Returns:
429      A list of providers needed to automatically register the test result.
430    """
431
432    tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
433    testbin = env.ctx.actions.declare_file(env.ctx.label.name + tc.file_ext)
434    if env.failures:
435        failure_message_lines = "\n".join(env.failures).split("\n")
436        escaped_failure_message_lines = [
437            "".join([
438                tc.escape_chars_with.get(c, tc.escape_other_chars_with) + c
439                for c in line.elems()
440            ])
441            for line in failure_message_lines
442        ]
443        cmd = tc.failure_templ % tc.join_on.join(escaped_failure_message_lines)
444    else:
445        cmd = tc.success_templ
446
447    env.ctx.actions.write(
448        output = testbin,
449        content = cmd,
450        is_executable = True,
451    )
452    return [DefaultInfo(executable = testbin)]
453
454def _fail(env, msg):
455    """Unconditionally causes the current test to fail.
456
457    Args:
458      env: The test environment returned by `unittest.begin`.
459      msg: The message to log describing the failure.
460    """
461    full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg)
462
463    # There isn't a better way to output the message in Starlark, so use print.
464    # buildifier: disable=print
465    print(full_msg)
466    env.failures.append(full_msg)
467
468def _assert_true(
469        env,
470        condition,
471        msg = "Expected condition to be true, but was false."):
472    """Asserts that the given `condition` is true.
473
474    Args:
475      env: The test environment returned by `unittest.begin`.
476      condition: A value that will be evaluated in a Boolean context.
477      msg: An optional message that will be printed that describes the failure.
478          If omitted, a default will be used.
479    """
480    if not condition:
481        _fail(env, msg)
482
483def _assert_false(
484        env,
485        condition,
486        msg = "Expected condition to be false, but was true."):
487    """Asserts that the given `condition` is false.
488
489    Args:
490      env: The test environment returned by `unittest.begin`.
491      condition: A value that will be evaluated in a Boolean context.
492      msg: An optional message that will be printed that describes the failure.
493          If omitted, a default will be used.
494    """
495    if condition:
496        _fail(env, msg)
497
498def _assert_equals(env, expected, actual, msg = None):
499    """Asserts that the given `expected` and `actual` values are equal.
500
501    Args:
502      env: The test environment returned by `unittest.begin`.
503      expected: The expected value of some computation.
504      actual: The actual value returned by some computation.
505      msg: An optional message that will be printed that describes the failure.
506          If omitted, a default will be used.
507    """
508    if expected != actual:
509        expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual)
510        if msg:
511            full_msg = "%s (%s)" % (msg, expectation_msg)
512        else:
513            full_msg = expectation_msg
514        _fail(env, full_msg)
515
516def _assert_set_equals(env, expected, actual, msg = None):
517    """Asserts that the given `expected` and `actual` sets are equal.
518
519    Args:
520      env: The test environment returned by `unittest.begin`.
521      expected: The expected set resulting from some computation.
522      actual: The actual set returned by some computation.
523      msg: An optional message that will be printed that describes the failure.
524          If omitted, a default will be used.
525    """
526    if not new_sets.is_equal(expected, actual):
527        missing = new_sets.difference(expected, actual)
528        unexpected = new_sets.difference(actual, expected)
529        expectation_msg = "Expected %s, but got %s" % (new_sets.str(expected), new_sets.str(actual))
530        if new_sets.length(missing) > 0:
531            expectation_msg += ", missing are %s" % (new_sets.str(missing))
532        if new_sets.length(unexpected) > 0:
533            expectation_msg += ", unexpected are %s" % (new_sets.str(unexpected))
534        if msg:
535            full_msg = "%s (%s)" % (msg, expectation_msg)
536        else:
537            full_msg = expectation_msg
538        _fail(env, full_msg)
539
540_assert_new_set_equals = _assert_set_equals
541
542def _expect_failure(env, expected_failure_msg = ""):
543    """Asserts that the target under test has failed with a given error message.
544
545    This requires that the analysis test is created with `analysistest.make()` and
546    `expect_failures = True` is specified.
547
548    Args:
549      env: The test environment returned by `analysistest.begin`.
550      expected_failure_msg: The error message to expect as a result of analysis failures.
551    """
552    dep = _target_under_test(env)
553    if AnalysisFailureInfo in dep:
554        actual_errors = ""
555        for cause in dep[AnalysisFailureInfo].causes.to_list():
556            actual_errors += cause.message + "\n"
557        if actual_errors.find(expected_failure_msg) < 0:
558            expectation_msg = "Expected errors to contain '%s' but did not. " % expected_failure_msg
559            expectation_msg += "Actual errors:%s" % actual_errors
560            _fail(env, expectation_msg)
561    else:
562        _fail(env, "Expected failure of target_under_test, but found success")
563
564def _target_actions(env):
565    """Returns a list of actions registered by the target under test.
566
567    Args:
568      env: The test environment returned by `analysistest.begin`.
569
570    Returns:
571      A list of actions registered by the target under test
572    """
573
574    # Validate?
575    return _target_under_test(env)[_ActionInfo].actions
576
577def _target_bin_dir_path(env):
578    """Returns ctx.bin_dir.path for the target under test.
579
580    Args:
581      env: The test environment returned by `analysistest.begin`.
582
583    Returns:
584      Output bin dir path string.
585    """
586    return _target_under_test(env)[_ActionInfo].bin_path
587
588def _target_under_test(env):
589    """Returns the target under test.
590
591    Args:
592      env: The test environment returned by `analysistest.begin`.
593
594    Returns:
595      The target under test.
596    """
597    result = getattr(env.ctx.attr, "target_under_test")
598    if types.is_list(result):
599        if result:
600            return result[0]
601        else:
602            fail("test rule does not have a target_under_test")
603    return result
604
605def _loading_test_impl(ctx):
606    tc = ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
607    content = tc.success_templ
608    if ctx.attr.failure_message:
609        content = tc.failure_templ % ctx.attr.failure_message
610
611    testbin = ctx.actions.declare_file("loading_test_" + ctx.label.name + tc.file_ext)
612    ctx.actions.write(
613        output = testbin,
614        content = content,
615        is_executable = True,
616    )
617    return [DefaultInfo(executable = testbin)]
618
619_loading_test = rule(
620    implementation = _loading_test_impl,
621    attrs = {
622        "failure_message": attr.string(),
623    },
624    toolchains = [TOOLCHAIN_TYPE],
625    test = True,
626)
627
628def _loading_make(name):
629    """Creates a loading phase test environment and test_suite.
630
631    Args:
632       name: name of the suite of tests to create
633
634    Returns:
635       loading phase environment passed to other loadingtest functions
636    """
637    native.test_suite(
638        name = name + "_tests",
639        tags = [name + "_test_case"],
640    )
641    return struct(name = name)
642
643def _loading_assert_equals(env, test_case, expected, actual):
644    """Creates a test case for asserting state at LOADING phase.
645
646    Args:
647      env:       Loading test env created from loadingtest.make
648      test_case: Name of the test case
649      expected:  Expected value to test
650      actual:    Actual value received.
651
652    Returns:
653      None, creates test case
654    """
655
656    msg = None
657    if expected != actual:
658        msg = 'Expected "%s", but got "%s"' % (expected, actual)
659
660    _loading_test(
661        name = "%s_%s" % (env.name, test_case),
662        failure_message = msg,
663        tags = [env.name + "_test_case"],
664    )
665
666asserts = struct(
667    expect_failure = _expect_failure,
668    equals = _assert_equals,
669    false = _assert_false,
670    set_equals = _assert_set_equals,
671    new_set_equals = _assert_new_set_equals,
672    true = _assert_true,
673)
674
675unittest = struct(
676    make = _make,
677    suite = _suite,
678    begin = _begin,
679    end = _end,
680    fail = _fail,
681)
682
683analysistest = struct(
684    make = _make_analysis_test,
685    begin = _begin_analysis_test,
686    end = _end_analysis_test,
687    fail = _fail,
688    target_actions = _target_actions,
689    target_bin_dir_path = _target_bin_dir_path,
690    target_under_test = _target_under_test,
691)
692
693loadingtest = struct(
694    make = _loading_make,
695    equals = _loading_assert_equals,
696)
697