1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27 #include "nir_worklist.h"
28
29 static bool
nir_texop_implies_derivative(nir_texop op)30 nir_texop_implies_derivative(nir_texop op)
31 {
32 return op == nir_texop_tex ||
33 op == nir_texop_txb ||
34 op == nir_texop_lod;
35 }
36 #define MOVE_INSTR_FLAG 1
37 #define STOP_PROCESSING_INSTR_FLAG 2
38
39 /** Check recursively if the source can be moved to the top of the shader.
40 * Sets instr->pass_flags to MOVE_INSTR_FLAG and adds the instr
41 * to the given worklist
42 */
43 static bool
can_move_src(nir_src * src,void * worklist)44 can_move_src(nir_src *src, void *worklist)
45 {
46 nir_instr *instr = src->ssa->parent_instr;
47 if (instr->pass_flags)
48 return true;
49
50 /* Phi instructions can't be moved at all. Also, if we're dependent on
51 * a phi then we are dependent on some other bit of control flow and
52 * it's hard to figure out the proper condition.
53 */
54 if (instr->type == nir_instr_type_phi)
55 return false;
56
57 if (instr->type == nir_instr_type_intrinsic) {
58 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
59 if (intrin->intrinsic == nir_intrinsic_load_deref) {
60 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
61 if (!nir_deref_mode_is_one_of(deref, nir_var_read_only_modes))
62 return false;
63 } else if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
64 NIR_INTRINSIC_CAN_REORDER)) {
65 return false;
66 }
67 }
68
69 /* set pass_flags and remember the instruction for potential cleanup */
70 instr->pass_flags = MOVE_INSTR_FLAG;
71 nir_instr_worklist_push_tail(worklist, instr);
72
73 if (!nir_foreach_src(instr, can_move_src, worklist)) {
74 return false;
75 }
76 return true;
77 }
78
79 /** Try to mark a discard or demote instruction for moving
80 *
81 * This function does two things. One is that it searches through the
82 * dependency chain to see if this discard is an instruction that we can move
83 * up to the top. Second, if the discard is one we can move, it tags the
84 * discard and its dependencies (using pass_flags = 1).
85 * Demote are handled the same way, except that they can still be moved up
86 * when implicit derivatives are used.
87 */
88 static bool
try_move_discard(nir_intrinsic_instr * discard)89 try_move_discard(nir_intrinsic_instr *discard)
90 {
91 /* We require the discard to be in the top level of control flow. We
92 * could, in theory, move discards that are inside ifs or loops but that
93 * would be a lot more work.
94 */
95 if (discard->instr.block->cf_node.parent->type != nir_cf_node_function)
96 return false;
97
98 /* Build the set of all instructions discard depends on to be able to
99 * clear the flags in case the discard cannot be moved.
100 */
101 nir_instr_worklist *work = nir_instr_worklist_create();
102 if (!work)
103 return false;
104 discard->instr.pass_flags = MOVE_INSTR_FLAG;
105
106 bool can_move_discard = can_move_src(&discard->src[0], work);
107 if (!can_move_discard) {
108 /* Moving the discard is impossible: clear the flags */
109 discard->instr.pass_flags = 0;
110 nir_foreach_instr_in_worklist(instr, work)
111 instr->pass_flags = 0;
112 }
113
114 nir_instr_worklist_destroy(work);
115
116 return can_move_discard;
117 }
118
119 static bool
opt_move_discards_to_top_impl(nir_function_impl * impl)120 opt_move_discards_to_top_impl(nir_function_impl *impl)
121 {
122 bool progress = false;
123 bool consider_discards = true;
124 bool moved = false;
125
126 /* Walk through the instructions and look for a discard that we can move
127 * to the top of the program. If we hit any operation along the way that
128 * we cannot safely move a discard above, break out of the loop and stop
129 * trying to move any more discards.
130 */
131 nir_foreach_block(block, impl) {
132 nir_foreach_instr_safe(instr, block) {
133 instr->pass_flags = 0;
134
135 switch (instr->type) {
136 case nir_instr_type_alu: {
137 nir_alu_instr *alu = nir_instr_as_alu(instr);
138 if (nir_op_is_derivative(alu->op))
139 consider_discards = false;
140 continue;
141 }
142
143 case nir_instr_type_deref:
144 case nir_instr_type_load_const:
145 case nir_instr_type_undef:
146 case nir_instr_type_phi:
147 case nir_instr_type_debug_info:
148 /* These are all safe */
149 continue;
150
151 case nir_instr_type_call:
152 instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
153 /* We don't know what the function will do */
154 goto break_all;
155
156 case nir_instr_type_tex: {
157 nir_tex_instr *tex = nir_instr_as_tex(instr);
158 if (nir_texop_implies_derivative(tex->op))
159 consider_discards = false;
160 continue;
161 }
162
163 case nir_instr_type_intrinsic: {
164 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
165 if (nir_intrinsic_writes_external_memory(intrin)) {
166 instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
167 goto break_all;
168 }
169 switch (intrin->intrinsic) {
170 case nir_intrinsic_quad_broadcast:
171 case nir_intrinsic_quad_swap_horizontal:
172 case nir_intrinsic_quad_swap_vertical:
173 case nir_intrinsic_quad_swap_diagonal:
174 case nir_intrinsic_quad_vote_all:
175 case nir_intrinsic_quad_vote_any:
176 case nir_intrinsic_quad_swizzle_amd:
177 consider_discards = false;
178 break;
179 case nir_intrinsic_vote_any:
180 case nir_intrinsic_vote_all:
181 case nir_intrinsic_vote_feq:
182 case nir_intrinsic_vote_ieq:
183 case nir_intrinsic_ballot:
184 case nir_intrinsic_first_invocation:
185 case nir_intrinsic_read_invocation:
186 case nir_intrinsic_read_first_invocation:
187 case nir_intrinsic_elect:
188 case nir_intrinsic_reduce:
189 case nir_intrinsic_inclusive_scan:
190 case nir_intrinsic_exclusive_scan:
191 case nir_intrinsic_shuffle:
192 case nir_intrinsic_shuffle_xor:
193 case nir_intrinsic_shuffle_up:
194 case nir_intrinsic_shuffle_down:
195 case nir_intrinsic_rotate:
196 case nir_intrinsic_masked_swizzle_amd:
197 instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
198 goto break_all;
199 case nir_intrinsic_terminate_if:
200 if (!consider_discards) {
201 /* assume that a shader either uses terminate or demote, but not both */
202 instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
203 goto break_all;
204 }
205 FALLTHROUGH;
206 case nir_intrinsic_demote_if:
207 moved = moved || try_move_discard(intrin);
208 break;
209 default:
210 break;
211 }
212 continue;
213 }
214
215 case nir_instr_type_jump: {
216 nir_jump_instr *jump = nir_instr_as_jump(instr);
217 /* A return would cause the discard to not get executed */
218 if (jump->type == nir_jump_return) {
219 instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
220 goto break_all;
221 }
222 continue;
223 }
224
225 case nir_instr_type_parallel_copy:
226 unreachable("Unhanded instruction type");
227 }
228 }
229 }
230 break_all:
231
232 if (moved) {
233 /* Walk the list of instructions and move the discard/demote and
234 * everything it depends on to the top. We walk the instruction list
235 * here because it ensures that everything stays in its original order.
236 * This provides stability for the algorithm and ensures that we don't
237 * accidentally get dependencies out-of-order.
238 */
239 nir_cursor cursor = nir_before_impl(impl);
240 nir_foreach_block(block, impl) {
241 nir_foreach_instr_safe(instr, block) {
242 if (instr->pass_flags == STOP_PROCESSING_INSTR_FLAG)
243 return progress;
244 if (instr->pass_flags == MOVE_INSTR_FLAG) {
245 progress |= nir_instr_move(cursor, instr);
246 cursor = nir_after_instr(instr);
247 }
248 }
249 }
250 }
251
252 return progress;
253 }
254
255 /* This optimization only operates on discard_if/demoe_if so
256 * nir_opt_conditional_discard and nir_lower_discard_or_demote
257 * should have been called before.
258 */
259 bool
nir_opt_move_discards_to_top(nir_shader * shader)260 nir_opt_move_discards_to_top(nir_shader *shader)
261 {
262 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
263
264 bool progress = false;
265
266 if (!shader->info.fs.uses_discard)
267 return false;
268
269 nir_foreach_function_impl(impl, shader) {
270 if (opt_move_discards_to_top_impl(impl)) {
271 nir_metadata_preserve(impl, nir_metadata_control_flow);
272 progress = true;
273 }
274 }
275
276 return progress;
277 }
278