1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler/spirv/nir_spirv.h"
25 #include "nir/nir.h"
26 #include "rogue.h"
27 #include "util/macros.h"
28
29 #include <stdbool.h>
30
31 /**
32 * \file rogue_nir.c
33 *
34 * \brief Contains SPIR-V and NIR-specific functions.
35 */
36
37 /**
38 * \brief SPIR-V to NIR compilation options.
39 */
40 static const struct spirv_to_nir_options spirv_options = {
41 .environment = NIR_SPIRV_VULKAN,
42
43 /* Buffer address: (descriptor_set, binding), offset. */
44 .ubo_addr_format = nir_address_format_64bit_global,
45 };
46
47 static const nir_shader_compiler_options nir_options = {
48 .fuse_ffma32 = true,
49 };
50
rogue_glsl_type_size(const struct glsl_type * type,bool bindless)51 static int rogue_glsl_type_size(const struct glsl_type *type, bool bindless)
52 {
53 return glsl_count_attribute_slots(type, false);
54 }
55
56 /**
57 * \brief Applies optimizations and passes required to lower the NIR shader into
58 * a form suitable for lowering to Rogue IR.
59 *
60 * \param[in] ctx Shared multi-stage build context.
61 * \param[in] shader Rogue shader.
62 * \param[in] stage Shader stage.
63 */
rogue_nir_passes(struct rogue_build_ctx * ctx,nir_shader * nir,gl_shader_stage stage)64 static void rogue_nir_passes(struct rogue_build_ctx *ctx,
65 nir_shader *nir,
66 gl_shader_stage stage)
67 {
68 bool progress;
69
70 #if !defined(NDEBUG)
71 bool nir_debug_print_shader_prev = nir_debug_print_shader[nir->info.stage];
72 nir_debug_print_shader[nir->info.stage] = ROGUE_DEBUG(NIR_PASSES);
73 #endif /* !defined(NDEBUG) */
74
75 nir_validate_shader(nir, "after spirv_to_nir");
76
77 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
78
79 /* Splitting. */
80 NIR_PASS_V(nir, nir_split_var_copies);
81 NIR_PASS_V(nir, nir_split_per_member_structs);
82
83 /* Replace references to I/O variables with intrinsics. */
84 NIR_PASS_V(nir,
85 nir_lower_io,
86 nir_var_shader_in | nir_var_shader_out,
87 rogue_glsl_type_size,
88 (nir_lower_io_options)0);
89
90 /* Load inputs to scalars (single registers later). */
91 /* TODO: Fitrp can process multiple frag inputs at once, scalarise I/O. */
92 NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_shader_in, NULL, NULL);
93
94 /* Optimize GL access qualifiers. */
95 const nir_opt_access_options opt_access_options = {
96 .is_vulkan = true,
97 };
98 NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
99
100 /* Apply PFO code to the fragment shader output. */
101 if (nir->info.stage == MESA_SHADER_FRAGMENT)
102 NIR_PASS_V(nir, rogue_nir_pfo);
103
104 /* Load outputs to scalars (single registers later). */
105 NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_shader_out, NULL, NULL);
106
107 /* Lower ALU operations to scalars. */
108 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
109
110 /* Lower load_consts to scalars. */
111 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
112
113 /* Additional I/O lowering. */
114 NIR_PASS_V(nir,
115 nir_lower_explicit_io,
116 nir_var_mem_ubo,
117 spirv_options.ubo_addr_format);
118 NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_mem_ubo, NULL, NULL);
119 NIR_PASS_V(nir, rogue_nir_lower_io);
120
121 /* Algebraic opts. */
122 do {
123 progress = false;
124
125 NIR_PASS(progress, nir, nir_copy_prop);
126 NIR_PASS(progress, nir, nir_opt_cse);
127 NIR_PASS(progress, nir, nir_opt_algebraic);
128 NIR_PASS(progress, nir, nir_opt_constant_folding);
129 NIR_PASS(progress, nir, nir_opt_dce);
130 NIR_PASS_V(nir, nir_opt_gcm, false);
131 } while (progress);
132
133 /* Late algebraic opts. */
134 do {
135 progress = false;
136
137 NIR_PASS(progress, nir, nir_opt_algebraic_late);
138 NIR_PASS_V(nir, nir_opt_constant_folding);
139 NIR_PASS_V(nir, nir_copy_prop);
140 NIR_PASS_V(nir, nir_opt_dce);
141 NIR_PASS_V(nir, nir_opt_cse);
142 } while (progress);
143
144 /* Remove unused constant registers. */
145 NIR_PASS_V(nir, nir_opt_dce);
146
147 /* Move loads to just before they're needed. */
148 /* Disabled for now since we want to try and keep them vectorised and group
149 * them. */
150 /* TODO: Investigate this further. */
151 /* NIR_PASS_V(nir, nir_opt_move, nir_move_load_ubo | nir_move_load_input); */
152
153 /* TODO: Re-enable scheduling after register pressure tweaks. */
154 #if 0
155 /* Instruction scheduling. */
156 struct nir_schedule_options schedule_options = {
157 .threshold = ROGUE_MAX_REG_TEMP / 2,
158 };
159 NIR_PASS_V(nir, nir_schedule, &schedule_options);
160 #endif
161
162 /* Assign I/O locations. */
163 nir_assign_io_var_locations(nir,
164 nir_var_shader_in,
165 &nir->num_inputs,
166 nir->info.stage);
167 nir_assign_io_var_locations(nir,
168 nir_var_shader_out,
169 &nir->num_outputs,
170 nir->info.stage);
171
172 /* Renumber SSA defs. */
173 nir_index_ssa_defs(nir_shader_get_entrypoint(nir));
174
175 /* Gather info into nir shader struct. */
176 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
177
178 /* Clean-up after passes. */
179 nir_sweep(nir);
180
181 nir_validate_shader(nir, "after passes");
182 if (ROGUE_DEBUG(NIR)) {
183 fputs("after passes\n", stdout);
184 nir_print_shader(nir, stdout);
185 }
186
187 #if !defined(NDEBUG)
188 nir_debug_print_shader[nir->info.stage] = nir_debug_print_shader_prev;
189 #endif /* !defined(NDEBUG) */
190 }
191
192 /**
193 * \brief Converts a SPIR-V shader to NIR.
194 *
195 * \param[in] ctx Shared multi-stage build context.
196 * \param[in] entry Shader entry-point function name.
197 * \param[in] stage Shader stage.
198 * \param[in] spirv_size SPIR-V data length in DWORDs.
199 * \param[in] spirv_data SPIR-V data.
200 * \param[in] num_spec Number of SPIR-V specializations.
201 * \param[in] spec SPIR-V specializations.
202 * \return A nir_shader* if successful, or NULL if unsuccessful.
203 */
204 PUBLIC
rogue_spirv_to_nir(rogue_build_ctx * ctx,gl_shader_stage stage,const char * entry,unsigned spirv_size,const uint32_t * spirv_data,unsigned num_spec,struct nir_spirv_specialization * spec)205 nir_shader *rogue_spirv_to_nir(rogue_build_ctx *ctx,
206 gl_shader_stage stage,
207 const char *entry,
208 unsigned spirv_size,
209 const uint32_t *spirv_data,
210 unsigned num_spec,
211 struct nir_spirv_specialization *spec)
212 {
213 nir_shader *nir;
214
215 nir = spirv_to_nir(spirv_data,
216 spirv_size,
217 spec,
218 num_spec,
219 stage,
220 entry,
221 &spirv_options,
222 &nir_options);
223 if (!nir)
224 return NULL;
225
226 ralloc_steal(ctx, nir);
227
228 /* Apply passes. */
229 rogue_nir_passes(ctx, nir, stage);
230
231 /* Collect I/O data to pass back to the driver. */
232 rogue_collect_io_data(ctx, nir);
233
234 return nir;
235 }
236