xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_opt_memcpy.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2020 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir_builder.h"
25 
26 static bool
opt_memcpy_deref_cast(nir_intrinsic_instr * cpy,nir_src * deref_src)27 opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src)
28 {
29    assert(cpy->intrinsic == nir_intrinsic_memcpy_deref);
30 
31    nir_deref_instr *cast = nir_src_as_deref(*deref_src);
32    if (cast == NULL || cast->deref_type != nir_deref_type_cast)
33       return false;
34 
35    /* We always have to replace the source with a deref, not a bare uint
36     * pointer.  If it's the first deref in the chain, bail.
37     */
38    nir_deref_instr *parent = nir_src_as_deref(cast->parent);
39    if (parent == NULL)
40       return false;
41 
42    /* If it has useful alignment information, we want to keep that */
43    if (cast->cast.align_mul > 0)
44       return false;
45 
46    /* Casts to uint8 or int8 never do us any good; get rid of them */
47    if (cast->type == glsl_int8_t_type() ||
48        cast->type == glsl_uint8_t_type()) {
49       nir_src_rewrite(deref_src, &parent->def);
50       return true;
51    }
52 
53    int64_t parent_type_size = glsl_get_explicit_size(parent->type, false);
54    if (parent_type_size < 0)
55       return false;
56 
57    if (!nir_src_is_const(cpy->src[2]))
58       return false;
59 
60    /* We don't want to get rid of the cast if the resulting type would be
61     * smaller than the amount of data we're copying.
62     */
63    if (nir_src_as_uint(cpy->src[2]) < (uint64_t)parent_type_size)
64       return false;
65 
66    nir_src_rewrite(deref_src, &parent->def);
67    return true;
68 }
69 
70 static bool
type_is_tightly_packed(const struct glsl_type * type,unsigned * size_out)71 type_is_tightly_packed(const struct glsl_type *type, unsigned *size_out)
72 {
73    unsigned size = 0;
74    if (glsl_type_is_struct_or_ifc(type)) {
75       unsigned num_fields = glsl_get_length(type);
76       for (unsigned i = 0; i < num_fields; i++) {
77          const struct glsl_struct_field *field =
78             glsl_get_struct_field_data(type, i);
79 
80          if (field->offset < 0 || field->offset != size)
81             return false;
82 
83          unsigned field_size;
84          if (!type_is_tightly_packed(field->type, &field_size))
85             return false;
86 
87          size = field->offset + field_size;
88       }
89    } else if (glsl_type_is_array_or_matrix(type)) {
90       if (glsl_type_is_unsized_array(type))
91          return false;
92 
93       unsigned stride = glsl_get_explicit_stride(type);
94       if (stride == 0)
95          return false;
96 
97       const struct glsl_type *elem_type = glsl_get_array_element(type);
98 
99       unsigned elem_size;
100       if (!type_is_tightly_packed(elem_type, &elem_size))
101          return false;
102 
103       if (elem_size != stride)
104          return false;
105 
106       size = stride * glsl_get_length(type);
107    } else {
108       assert(glsl_type_is_vector_or_scalar(type));
109       if (glsl_get_explicit_stride(type) > 0)
110          return false;
111 
112       if (glsl_type_is_boolean(type))
113          return false;
114 
115       size = glsl_get_explicit_size(type, false);
116    }
117 
118    if (size_out)
119       *size_out = size;
120    return true;
121 }
122 
123 static bool
try_lower_memcpy(nir_builder * b,nir_intrinsic_instr * cpy,struct set * complex_vars)124 try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy,
125                  struct set *complex_vars)
126 {
127    nir_deref_instr *dst = nir_src_as_deref(cpy->src[0]);
128    nir_deref_instr *src = nir_src_as_deref(cpy->src[1]);
129 
130    /* A self-copy can always be eliminated */
131    if (dst == src) {
132       nir_instr_remove(&cpy->instr);
133       return true;
134    }
135 
136    if (!nir_src_is_const(cpy->src[2]))
137       return false;
138 
139    uint64_t size = nir_src_as_uint(cpy->src[2]);
140    if (size == 0) {
141       nir_instr_remove(&cpy->instr);
142       return true;
143    }
144 
145    if (glsl_type_is_vector_or_scalar(src->type) &&
146        glsl_type_is_vector_or_scalar(dst->type) &&
147        glsl_get_explicit_size(dst->type, false) == size &&
148        glsl_get_explicit_size(src->type, false) == size) {
149       b->cursor = nir_instr_remove(&cpy->instr);
150       nir_def *data =
151          nir_load_deref_with_access(b, src, nir_intrinsic_src_access(cpy));
152       data = nir_bitcast_vector(b, data, glsl_get_bit_size(dst->type));
153       assert(data->num_components == glsl_get_vector_elements(dst->type));
154       nir_store_deref_with_access(b, dst, data, ~0 /* write mask */,
155                                   nir_intrinsic_dst_access(cpy));
156       return true;
157    }
158 
159    unsigned type_size;
160    if (dst->type == src->type &&
161        type_is_tightly_packed(dst->type, &type_size) &&
162        type_size == size) {
163       b->cursor = nir_instr_remove(&cpy->instr);
164       nir_copy_deref_with_access(b, dst, src,
165                                  nir_intrinsic_dst_access(cpy),
166                                  nir_intrinsic_src_access(cpy));
167       return true;
168    }
169 
170    /* If one of the two types is tightly packed and happens to equal the
171     * memcpy size, then we can get the memcpy by casting to that type and
172     * doing a deref copy.
173     *
174     * However, if we blindly apply this logic, we may end up with extra casts
175     * where we don't want them. The whole point of converting memcpy to
176     * copy_deref is in the hopes that nir_opt_copy_prop_vars or
177     * nir_lower_vars_to_ssa will get rid of the copy and those passes don't
178     * handle casts well. Heuristically, only do this optimization if the
179     * tightly packed type is on a deref with nir_var_function_temp so we stick
180     * the cast on the other mode.
181     */
182    if (dst->modes == nir_var_function_temp &&
183        type_is_tightly_packed(dst->type, &type_size) &&
184        type_size == size) {
185       b->cursor = nir_instr_remove(&cpy->instr);
186       src = nir_build_deref_cast(b, &src->def,
187                                  src->modes, dst->type, 0);
188       nir_copy_deref_with_access(b, dst, src,
189                                  nir_intrinsic_dst_access(cpy),
190                                  nir_intrinsic_src_access(cpy));
191       return true;
192    }
193 
194    /* If we can get at the variable AND the only complex use of that variable
195     * is as a memcpy destination, then we don't have to care about any empty
196     * space in the variable.  In particular, we know that the variable is never
197     * cast to any other type and it's never used as a memcpy source so nothing
198     * can see any padding bytes.  This holds even if some other memcpy only
199     * writes to part of the variable.
200     */
201    if (dst->deref_type == nir_deref_type_var &&
202        dst->modes == nir_var_function_temp &&
203        _mesa_set_search(complex_vars, dst->var) == NULL &&
204        glsl_get_explicit_size(dst->type, false) <= size) {
205       b->cursor = nir_instr_remove(&cpy->instr);
206       src = nir_build_deref_cast(b, &src->def,
207                                  src->modes, dst->type, 0);
208       nir_copy_deref_with_access(b, dst, src,
209                                  nir_intrinsic_dst_access(cpy),
210                                  nir_intrinsic_src_access(cpy));
211       return true;
212    }
213 
214    if (src->modes == nir_var_function_temp &&
215        type_is_tightly_packed(src->type, &type_size) &&
216        type_size == size) {
217       b->cursor = nir_instr_remove(&cpy->instr);
218       dst = nir_build_deref_cast(b, &dst->def,
219                                  dst->modes, src->type, 0);
220       nir_copy_deref_with_access(b, dst, src,
221                                  nir_intrinsic_dst_access(cpy),
222                                  nir_intrinsic_src_access(cpy));
223       return true;
224    }
225 
226    return false;
227 }
228 
229 static bool
opt_memcpy_impl(nir_function_impl * impl)230 opt_memcpy_impl(nir_function_impl *impl)
231 {
232    bool progress = false;
233 
234    nir_builder b = nir_builder_create(impl);
235 
236    struct set *complex_vars = _mesa_pointer_set_create(NULL);
237 
238    nir_foreach_block(block, impl) {
239       nir_foreach_instr(instr, block) {
240          if (instr->type != nir_instr_type_deref)
241             continue;
242 
243          nir_deref_instr *deref = nir_instr_as_deref(instr);
244          if (deref->deref_type != nir_deref_type_var)
245             continue;
246 
247          nir_deref_instr_has_complex_use_options opts =
248             nir_deref_instr_has_complex_use_allow_memcpy_dst;
249          if (nir_deref_instr_has_complex_use(deref, opts))
250             _mesa_set_add(complex_vars, deref->var);
251       }
252    }
253 
254    nir_foreach_block(block, impl) {
255       nir_foreach_instr_safe(instr, block) {
256          if (instr->type != nir_instr_type_intrinsic)
257             continue;
258 
259          nir_intrinsic_instr *cpy = nir_instr_as_intrinsic(instr);
260          if (cpy->intrinsic != nir_intrinsic_memcpy_deref)
261             continue;
262 
263          while (opt_memcpy_deref_cast(cpy, &cpy->src[0]))
264             progress = true;
265          while (opt_memcpy_deref_cast(cpy, &cpy->src[1]))
266             progress = true;
267 
268          if (try_lower_memcpy(&b, cpy, complex_vars)) {
269             progress = true;
270             continue;
271          }
272       }
273    }
274 
275    _mesa_set_destroy(complex_vars, NULL);
276 
277    if (progress) {
278       nir_metadata_preserve(impl, nir_metadata_control_flow);
279    } else {
280       nir_metadata_preserve(impl, nir_metadata_all);
281    }
282 
283    return progress;
284 }
285 
286 bool
nir_opt_memcpy(nir_shader * shader)287 nir_opt_memcpy(nir_shader *shader)
288 {
289    bool progress = false;
290 
291    nir_foreach_function_impl(impl, shader) {
292       if (opt_memcpy_impl(impl))
293          progress = true;
294    }
295 
296    return progress;
297 }
298