xref: /aosp_15_r20/external/bazelbuild-rules_testing/lib/private/analysis_test.bzl (revision d605057434dcabba796c020773aab68d9790ff9f)
1# Copyright 2022 The Bazel Authors. All rights reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#    http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""# Analysis test
16
17Support for testing analysis phase logic, such as rules.
18"""
19
20load("@bazel_skylib//lib:dicts.bzl", "dicts")
21load("//lib:truth.bzl", "truth")
22load("//lib:util.bzl", "recursive_testing_aspect", "testing_aspect")
23load("//lib/private:util.bzl", "get_test_name_from_function")
24
25def _fail(env, msg):
26    """Unconditionally causes the current test to fail.
27
28    Args:
29      env: The test environment returned by `unittest.begin`.
30      msg: The message to log describing the failure.
31    """
32    full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg)
33
34    # There isn't a better way to output the message in Starlark, so use print.
35    # buildifier: disable=print
36    print(full_msg)
37    env.failures.append(full_msg)
38
39def _begin_analysis_test(ctx):
40    """Begins a unit test.
41
42    This should be the first function called in a unit test implementation
43    function. It initializes a "test environment" that is used to collect
44    assertion failures so that they can be reported and logged at the end of the
45    test.
46
47    Args:
48      ctx: The Starlark context. Pass the implementation function's `ctx` argument
49          in verbatim.
50
51    Returns:
52        An analysis_test "environment" struct. The following fields are public:
53          * ctx: the underlying rule ctx
54          * expect: a truth Expect object (see truth.bzl).
55          * fail: A function to register failures for later reporting.
56
57        Other attributes are private, internal details and may change at any time. Do not rely
58        on internal details.
59    """
60    target = getattr(ctx.attr, "target")
61    target = target[0] if type(target) == type([]) else target
62    failures = []
63    failures_env = struct(
64        ctx = ctx,
65        failures = failures,
66    )
67    truth_env = struct(
68        ctx = ctx,
69        fail = lambda msg: _fail(failures_env, msg),
70    )
71    analysis_test_env = struct(
72        ctx = ctx,
73        # Visibility: package; only exposed so that our own tests can verify
74        # failure behavior.
75        _failures = failures,
76        fail = truth_env.fail,
77        expect = truth.expect(truth_env),
78    )
79    return analysis_test_env, target
80
81def _end_analysis_test(env):
82    """Ends an analysis test and logs the results.
83
84    This must be called and returned at the end of an analysis test implementation function so
85    that the results are reported.
86
87    Args:
88      env: The test environment returned by `analysistest.begin`.
89
90    Returns:
91      A list of providers needed to automatically register the analysis test result.
92    """
93    return [AnalysisTestResultInfo(
94        success = (len(env._failures) == 0),
95        message = "\n".join(env._failures),
96    )]
97
98def analysis_test(
99        name,
100        target,
101        impl,
102        expect_failure = False,
103        attrs = {},
104        attr_values = {},
105        fragments = [],
106        config_settings = {},
107        extra_target_under_test_aspects = [],
108        collect_actions_recursively = False):
109    """Creates an analysis test from its implementation function.
110
111    An analysis test verifies the behavior of a "real" rule target by examining
112    and asserting on the providers given by the real target.
113
114    Each analysis test is defined in an implementation function. This function handles
115    the boilerplate to create and return a test target and captures the
116    implementation function's name so that it can be printed in test feedback.
117
118    An example of an analysis test:
119
120    ```
121    def basic_test(name):
122        my_rule(name = name + "_subject", ...)
123
124        analysistest(name = name, target = name + "_subject", impl = _your_test)
125
126    def _your_test(env, target, actions):
127        env.assert_that(target).runfiles().contains_at_least("foo.txt")
128        env.assert_that(find_action(actions, generating="foo.txt")).argv().contains("--a")
129    ```
130
131    Args:
132      name: Name of the target. It should be a Starlark identifier, matching pattern
133          '[A-Za-z_][A-Za-z0-9_]*'.
134      target: The target to test.
135      impl: The implementation function of the analysis test.
136      expect_failure: If true, the analysis test will expect the target
137          to fail. Assertions can be made on the underlying failure using truth.expect_failure
138      attrs: An optional dictionary to supplement the attrs passed to the
139          unit test's `rule()` constructor.
140      attr_values: An optional dictionary of kwargs to pass onto the
141          analysis test target itself (e.g. common attributes like `tags`,
142          `target_compatible_with`, or attributes from `attrs`). Note that these
143          are for the analysis test target itself, not the target under test.
144      fragments: An optional list of fragment names that can be used to give rules access to
145          language-specific parts of configuration.
146      config_settings: A dictionary of configuration settings to change for the target under
147          test and its dependencies. This may be used to essentially change 'build flags' for
148          the target under test, and may thus be utilized to test multiple targets with different
149          flags in a single build. NOTE: When values that are labels (e.g. for the
150          --platforms flag), it's suggested to always explicitly call `Label()`
151          on the value before passing it in. This ensures the label is resolved
152          in your repository's context, not rule_testing's.
153      extra_target_under_test_aspects: An optional list of aspects to apply to the target_under_test
154          in addition to those set up by default for the test harness itself.
155      collect_actions_recursively: If true, runs testing_aspect over all attributes, otherwise
156          it is only applied to the target under test.
157
158    Returns:
159        (None)
160    """
161
162    attrs = dict(attrs)
163    attrs["_impl_name"] = attr.string(default = get_test_name_from_function(impl))
164
165    changed_settings = dict(config_settings)
166    if expect_failure:
167        changed_settings["//command_line_option:allow_analysis_failures"] = "True"
168
169    target_attr_kwargs = {}
170    if changed_settings:
171        test_transition = analysis_test_transition(
172            settings = changed_settings,
173        )
174        target_attr_kwargs["cfg"] = test_transition
175
176    attrs["target"] = attr.label(
177        aspects = [recursive_testing_aspect if collect_actions_recursively else testing_aspect] + extra_target_under_test_aspects,
178        mandatory = True,
179        **target_attr_kwargs
180    )
181
182    def wrapped_impl(ctx):
183        env, target = _begin_analysis_test(ctx)
184        impl(env, target)
185        return _end_analysis_test(env)
186
187    return testing.analysis_test(
188        name,
189        wrapped_impl,
190        attrs = attrs,
191        fragments = fragments,
192        attr_values = dicts.add(attr_values, {"target": target}),
193    )
194