1 /*
2 * Copyright (C) 2020-2021 Collabora, Ltd.
3 * Copyright © 2020 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "compiler/nir/nir.h"
26 #include "compiler/nir/nir_builder.h"
27
28 static bool
lower(nir_builder * b,nir_intrinsic_instr * intr,void * data)29 lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
30 {
31 bool *lower_plain_stores = data;
32
33 switch (intr->intrinsic) {
34 case nir_intrinsic_global_atomic:
35 case nir_intrinsic_global_atomic_swap:
36 case nir_intrinsic_image_atomic:
37 case nir_intrinsic_image_atomic_swap:
38 case nir_intrinsic_bindless_image_atomic:
39 case nir_intrinsic_bindless_image_atomic_swap:
40 break;
41 case nir_intrinsic_store_global:
42 case nir_intrinsic_image_store:
43 case nir_intrinsic_bindless_image_store:
44 if (!(*lower_plain_stores))
45 return false;
46 else
47 break;
48 default:
49 return false;
50 }
51
52 b->cursor = nir_before_instr(&intr->instr);
53 bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
54 nir_def *undef = NULL;
55
56 nir_def *helper = nir_load_helper_invocation(b, 1);
57 nir_push_if(b, nir_inot(b, helper));
58 nir_instr_remove(&intr->instr);
59 nir_builder_instr_insert(b, &intr->instr);
60
61 /* Per the spec, it does not matter what we return for helper threads.
62 * Represent this by an ssa_undef in the hopes the backend will be clever
63 * enough to optimize out the phi.
64 *
65 * Fragment shader helper invocations execute the same shader code as
66 * non-helper invocations, but will not have side effects that modify the
67 * framebuffer or other shader-accessible memory. In particular:
68 *
69 * ...
70 *
71 * Atomic operations to image, buffer, or atomic counter variables
72 * performed by helper invocations have no effect on the underlying
73 * image or buffer memory. The values returned by such atomic
74 * operations are undefined.
75 */
76 if (has_dest) {
77 nir_push_else(b, NULL);
78 undef = nir_undef(b, intr->def.num_components,
79 intr->def.bit_size);
80 }
81
82 nir_pop_if(b, NULL);
83
84 if (has_dest) {
85 nir_def *phi = nir_if_phi(b, &intr->def, undef);
86
87 /* We can't use nir_def_rewrite_uses_after on phis, so use the global
88 * version and fixup the phi manually
89 */
90 nir_def_rewrite_uses(&intr->def, phi);
91
92 nir_instr *phi_instr = phi->parent_instr;
93 nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
94 nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
95 intr->instr.block);
96 nir_src_rewrite(&phi_src->src, &intr->def);
97 }
98
99 return true;
100 }
101
102 bool
nir_lower_helper_writes(nir_shader * shader,bool lower_plain_stores)103 nir_lower_helper_writes(nir_shader *shader, bool lower_plain_stores)
104 {
105 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
106 return nir_shader_intrinsics_pass(shader, lower, nir_metadata_none,
107 &lower_plain_stores);
108 }
109