1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "spirv_to_dxil.h"
25 #include "nir_to_dxil.h"
26 #include "dxil_nir.h"
27 #include "dxil_nir_lower_int_cubemaps.h"
28 #include "shader_enums.h"
29 #include "spirv/nir_spirv.h"
30 #include "spirv/spirv_info.h"
31 #include "util/blob.h"
32 #include "dxil_spirv_nir.h"
33
34 #include "git_sha1.h"
35 #include "vulkan/vulkan.h"
36
37 static const struct spirv_capabilities
38 spirv_caps = {
39 .Shader = true,
40 .Geometry = true,
41 .DrawParameters = true,
42 .MultiView = true,
43 .GroupNonUniform = true,
44 .GroupNonUniformBallot = true,
45 .GroupNonUniformVote = true,
46 .GroupNonUniformShuffle = true,
47 .GroupNonUniformQuad = true,
48 .GroupNonUniformArithmetic = true,
49 .InputAttachmentArrayDynamicIndexingEXT = true,
50 .UniformTexelBufferArrayDynamicIndexingEXT = true,
51 .StorageTexelBufferArrayDynamicIndexingEXT = true,
52 .DenormFlushToZero = true,
53 .DenormPreserve = true,
54 .SignedZeroInfNanPreserve = true,
55 .RoundingModeRTE = true,
56 .RoundingModeRTZ = true,
57 .Float16 = true,
58 .Int16 = true,
59 .StorageBuffer8BitAccess = true,
60 .UniformAndStorageBuffer8BitAccess = true,
61 .StoragePushConstant8 = true,
62 .StorageUniformBufferBlock16 = true,
63 .StorageUniform16 = true,
64 .StoragePushConstant16 = true,
65 .StorageInputOutput16 = true,
66 .ShaderNonUniformEXT = true,
67 .RuntimeDescriptorArray = true,
68 .UniformBufferArrayNonUniformIndexingEXT = true,
69 .SampledImageArrayNonUniformIndexingEXT = true,
70 .StorageBufferArrayNonUniformIndexingEXT = true,
71 .StorageImageArrayNonUniformIndexingEXT = true,
72 .InputAttachmentArrayNonUniformIndexingEXT = true,
73 .UniformTexelBufferArrayNonUniformIndexingEXT = true,
74 .StorageTexelBufferArrayNonUniformIndexingEXT = true,
75 .StorageImageReadWithoutFormat = true,
76 .StorageImageWriteWithoutFormat = true,
77 .ImageQuery = true,
78 .Int64 = true,
79 .Float64 = true,
80 .Tessellation = true,
81 .PhysicalStorageBufferAddresses = true,
82 };
83
84 static const struct spirv_to_nir_options
85 spirv_to_nir_options = {
86 .capabilities = &spirv_caps,
87 .ubo_addr_format = nir_address_format_32bit_index_offset,
88 .ssbo_addr_format = nir_address_format_32bit_index_offset,
89 .shared_addr_format = nir_address_format_logical,
90 .phys_ssbo_addr_format = nir_address_format_32bit_index_offset_pack64,
91
92 .min_ubo_alignment = 256, /* D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT */
93 .min_ssbo_alignment = 16, /* D3D12_RAW_UAV_SRV_BYTE_ALIGNMENT */
94
95 .mediump_16bit_alu = true,
96 .mediump_16bit_derivatives = true,
97 };
98
99 const struct spirv_to_nir_options*
dxil_spirv_nir_get_spirv_options(void)100 dxil_spirv_nir_get_spirv_options(void)
101 {
102 return &spirv_to_nir_options;
103 }
104
105 /* Logic extracted from vk_spirv_to_nir() so we have the same preparation
106 * steps for both the vulkan driver and the lib used by the WebGPU
107 * implementation.
108 * Maybe we should move those steps out of vk_spirv_to_nir() and make
109 * them vk agnosting (right, the only vk specific thing is the vk_device
110 * object that's used for the debug callback passed to spirv_to_nir()).
111 */
112 void
dxil_spirv_nir_prep(nir_shader * nir)113 dxil_spirv_nir_prep(nir_shader *nir)
114 {
115 /* We have to lower away local constant initializers right before we
116 * inline functions. That way they get properly initialized at the top
117 * of the function and not at the top of its caller.
118 */
119 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
120 NIR_PASS_V(nir, nir_lower_returns);
121 NIR_PASS_V(nir, nir_inline_functions);
122 NIR_PASS_V(nir, nir_copy_prop);
123 NIR_PASS_V(nir, nir_opt_deref);
124
125 /* Pick off the single entrypoint that we want */
126 nir_remove_non_entrypoints(nir);
127
128 /* Now that we've deleted all but the main function, we can go ahead and
129 * lower the rest of the constant initializers. We do this here so that
130 * nir_remove_dead_variables and split_per_member_structs below see the
131 * corresponding stores.
132 */
133 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
134
135 /* Split member structs. We do this before lower_io_to_temporaries so that
136 * it doesn't lower system values to temporaries by accident.
137 */
138 NIR_PASS_V(nir, nir_split_var_copies);
139 NIR_PASS_V(nir, nir_split_per_member_structs);
140
141 NIR_PASS_V(nir, nir_remove_dead_variables,
142 nir_var_shader_in | nir_var_shader_out | nir_var_system_value |
143 nir_var_shader_call_data | nir_var_ray_hit_attrib,
144 NULL);
145
146 NIR_PASS_V(nir, nir_propagate_invariant, false);
147 }
148
149 static void
shared_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)150 shared_var_info(const struct glsl_type* type, unsigned* size, unsigned* align)
151 {
152 assert(glsl_type_is_vector_or_scalar(type));
153
154 uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
155 unsigned length = glsl_get_vector_elements(type);
156 *size = comp_size * length;
157 *align = comp_size;
158 }
159
160 static void
temp_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)161 temp_var_info(const struct glsl_type* type, unsigned* size, unsigned* align)
162 {
163 uint32_t base_size, base_align;
164 switch (glsl_get_base_type(type)) {
165 case GLSL_TYPE_ARRAY:
166 temp_var_info(glsl_get_array_element(type), &base_size, align);
167 *size = base_size * glsl_array_size(type);
168 break;
169 case GLSL_TYPE_STRUCT:
170 case GLSL_TYPE_INTERFACE:
171 *size = 0;
172 *align = 0;
173 for (uint32_t i = 0; i < glsl_get_length(type); ++i) {
174 temp_var_info(glsl_get_struct_field(type, i), &base_size, &base_align);
175 *size = ALIGN_POT(*size, base_align) + base_size;
176 *align = MAX2(*align, base_align);
177 }
178 break;
179 default:
180 glsl_get_natural_size_align_bytes(type, &base_size, &base_align);
181
182 *align = MAX2(base_align, 4);
183 *size = ALIGN_POT(base_size, *align);
184 break;
185 }
186 }
187
188 static nir_variable *
add_runtime_data_var(nir_shader * nir,unsigned desc_set,unsigned binding)189 add_runtime_data_var(nir_shader *nir, unsigned desc_set, unsigned binding)
190 {
191 unsigned runtime_data_size =
192 nir->info.stage == MESA_SHADER_COMPUTE
193 ? sizeof(struct dxil_spirv_compute_runtime_data)
194 : sizeof(struct dxil_spirv_vertex_runtime_data);
195
196 const struct glsl_type *array_type =
197 glsl_array_type(glsl_uint_type(), runtime_data_size / sizeof(unsigned),
198 sizeof(unsigned));
199 const struct glsl_struct_field field = {array_type, "arr"};
200 nir_variable *var = nir_variable_create(
201 nir, nir_var_mem_ubo,
202 glsl_struct_type(&field, 1, "runtime_data", false), "runtime_data");
203 var->data.descriptor_set = desc_set;
204 // Check that desc_set fits on descriptor_set
205 assert(var->data.descriptor_set == desc_set);
206 var->data.binding = binding;
207 var->data.how_declared = nir_var_hidden;
208 return var;
209 }
210
211 static bool
lower_shader_system_values(struct nir_builder * builder,nir_instr * instr,void * cb_data)212 lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
213 void *cb_data)
214 {
215 if (instr->type != nir_instr_type_intrinsic) {
216 return false;
217 }
218
219 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
220
221 /* All the intrinsics we care about are loads */
222 if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
223 return false;
224
225
226 const struct dxil_spirv_runtime_conf *conf =
227 (const struct dxil_spirv_runtime_conf *)cb_data;
228
229 int offset = 0;
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_load_num_workgroups:
232 offset =
233 offsetof(struct dxil_spirv_compute_runtime_data, group_count_x);
234 break;
235 case nir_intrinsic_load_base_workgroup_id:
236 offset =
237 offsetof(struct dxil_spirv_compute_runtime_data, base_group_x);
238 break;
239 case nir_intrinsic_load_first_vertex:
240 if (conf->first_vertex_and_base_instance_mode == DXIL_SPIRV_SYSVAL_TYPE_NATIVE)
241 return false;
242 offset = offsetof(struct dxil_spirv_vertex_runtime_data, first_vertex);
243 break;
244 case nir_intrinsic_load_is_indexed_draw:
245 offset =
246 offsetof(struct dxil_spirv_vertex_runtime_data, is_indexed_draw);
247 break;
248 case nir_intrinsic_load_base_instance:
249 if (conf->first_vertex_and_base_instance_mode == DXIL_SPIRV_SYSVAL_TYPE_NATIVE)
250 return false;
251 offset = offsetof(struct dxil_spirv_vertex_runtime_data, base_instance);
252 break;
253 case nir_intrinsic_load_draw_id:
254 offset = offsetof(struct dxil_spirv_vertex_runtime_data, draw_id);
255 break;
256 case nir_intrinsic_load_view_index:
257 if (!conf->lower_view_index)
258 return false;
259 offset = offsetof(struct dxil_spirv_vertex_runtime_data, view_index);
260 break;
261 default:
262 return false;
263 }
264
265 builder->cursor = nir_after_instr(instr);
266 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
267
268 nir_def *index = nir_vulkan_resource_index(
269 builder, nir_address_format_num_components(ubo_format),
270 nir_address_format_bit_size(ubo_format),
271 nir_imm_int(builder, 0),
272 .desc_set = conf->runtime_data_cbv.register_space,
273 .binding = conf->runtime_data_cbv.base_shader_register,
274 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
275
276 nir_def *load_desc = nir_load_vulkan_descriptor(
277 builder, nir_address_format_num_components(ubo_format),
278 nir_address_format_bit_size(ubo_format),
279 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
280
281 nir_def *load_data = nir_load_ubo(
282 builder,
283 intrin->def.num_components,
284 intrin->def.bit_size,
285 nir_channel(builder, load_desc, 0),
286 nir_imm_int(builder, offset),
287 .align_mul = 256,
288 .align_offset = offset,
289 .range_base = offset,
290 .range = intrin->def.bit_size * intrin->def.num_components / 8);
291
292 nir_def_rewrite_uses(&intrin->def, load_data);
293 nir_instr_remove(instr);
294 return true;
295 }
296
297 static bool
dxil_spirv_nir_lower_shader_system_values(nir_shader * shader,const struct dxil_spirv_runtime_conf * conf)298 dxil_spirv_nir_lower_shader_system_values(nir_shader *shader,
299 const struct dxil_spirv_runtime_conf *conf)
300 {
301 return nir_shader_instructions_pass(shader, lower_shader_system_values,
302 nir_metadata_control_flow |
303 nir_metadata_loop_analysis,
304 (void *)conf);
305 }
306
307 static nir_variable *
add_push_constant_var(nir_shader * nir,unsigned size,unsigned desc_set,unsigned binding)308 add_push_constant_var(nir_shader *nir, unsigned size, unsigned desc_set, unsigned binding)
309 {
310 /* Size must be a multiple of 16 as buffer load is loading 16 bytes at a time */
311 unsigned num_32bit_words = ALIGN_POT(size, 16) / 4;
312
313 const struct glsl_type *array_type =
314 glsl_array_type(glsl_uint_type(), num_32bit_words, 4);
315 const struct glsl_struct_field field = {array_type, "arr"};
316 nir_variable *var = nir_variable_create(
317 nir, nir_var_mem_ubo,
318 glsl_struct_type(&field, 1, "block", false), "push_constants");
319 var->data.descriptor_set = desc_set;
320 var->data.binding = binding;
321 var->data.how_declared = nir_var_hidden;
322 return var;
323 }
324
325 struct lower_load_push_constant_data {
326 nir_address_format ubo_format;
327 unsigned desc_set;
328 unsigned binding;
329 unsigned size;
330 };
331
332 static bool
lower_load_push_constant(struct nir_builder * builder,nir_instr * instr,void * cb_data)333 lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
334 void *cb_data)
335 {
336 struct lower_load_push_constant_data *data =
337 (struct lower_load_push_constant_data *)cb_data;
338
339 if (instr->type != nir_instr_type_intrinsic)
340 return false;
341
342 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
343
344 /* All the intrinsics we care about are loads */
345 if (intrin->intrinsic != nir_intrinsic_load_push_constant)
346 return false;
347
348 uint32_t base = nir_intrinsic_base(intrin);
349 uint32_t range = nir_intrinsic_range(intrin);
350
351 data->size = MAX2(base + range, data->size);
352
353 builder->cursor = nir_after_instr(instr);
354 nir_address_format ubo_format = data->ubo_format;
355
356 nir_def *index = nir_vulkan_resource_index(
357 builder, nir_address_format_num_components(ubo_format),
358 nir_address_format_bit_size(ubo_format),
359 nir_imm_int(builder, 0),
360 .desc_set = data->desc_set, .binding = data->binding,
361 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
362
363 nir_def *load_desc = nir_load_vulkan_descriptor(
364 builder, nir_address_format_num_components(ubo_format),
365 nir_address_format_bit_size(ubo_format),
366 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
367
368 nir_def *offset = intrin->src[0].ssa;
369 nir_def *load_data = nir_load_ubo(
370 builder,
371 intrin->def.num_components,
372 intrin->def.bit_size,
373 nir_channel(builder, load_desc, 0),
374 nir_iadd_imm(builder, offset, base),
375 .align_mul = nir_intrinsic_align_mul(intrin),
376 .align_offset = nir_intrinsic_align_offset(intrin),
377 .range_base = base,
378 .range = range);
379
380 nir_def_replace(&intrin->def, load_data);
381 return true;
382 }
383
384 static bool
dxil_spirv_nir_lower_load_push_constant(nir_shader * shader,nir_address_format ubo_format,unsigned desc_set,unsigned binding,uint32_t * size)385 dxil_spirv_nir_lower_load_push_constant(nir_shader *shader,
386 nir_address_format ubo_format,
387 unsigned desc_set, unsigned binding,
388 uint32_t *size)
389 {
390 bool ret;
391 struct lower_load_push_constant_data data = {
392 .ubo_format = ubo_format,
393 .desc_set = desc_set,
394 .binding = binding,
395 };
396 ret = nir_shader_instructions_pass(shader, lower_load_push_constant,
397 nir_metadata_control_flow |
398 nir_metadata_loop_analysis,
399 &data);
400
401 *size = data.size;
402
403 assert(ret == (*size > 0));
404
405 return ret;
406 }
407
408 struct lower_yz_flip_data {
409 bool *reads_sysval_ubo;
410 const struct dxil_spirv_runtime_conf *rt_conf;
411 };
412
413 static bool
lower_yz_flip(struct nir_builder * builder,nir_instr * instr,void * cb_data)414 lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
415 void *cb_data)
416 {
417 struct lower_yz_flip_data *data =
418 (struct lower_yz_flip_data *)cb_data;
419
420 if (instr->type != nir_instr_type_intrinsic)
421 return false;
422
423 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
424
425 if (intrin->intrinsic != nir_intrinsic_store_deref)
426 return false;
427
428 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
429 if (var->data.mode != nir_var_shader_out ||
430 var->data.location != VARYING_SLOT_POS)
431 return false;
432
433 builder->cursor = nir_before_instr(instr);
434
435 const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
436
437 nir_def *pos = intrin->src[1].ssa;
438 nir_def *y_pos = nir_channel(builder, pos, 1);
439 nir_def *z_pos = nir_channel(builder, pos, 2);
440 nir_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
441
442 if (rt_conf->yz_flip.mode & DXIL_SPIRV_YZ_FLIP_CONDITIONAL) {
443 // conditional YZ-flip. The flip bitmask is passed through the vertex
444 // runtime data UBO.
445 unsigned offset =
446 offsetof(struct dxil_spirv_vertex_runtime_data, yz_flip_mask);
447 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
448
449 nir_def *index = nir_vulkan_resource_index(
450 builder, nir_address_format_num_components(ubo_format),
451 nir_address_format_bit_size(ubo_format),
452 nir_imm_int(builder, 0),
453 .desc_set = rt_conf->runtime_data_cbv.register_space,
454 .binding = rt_conf->runtime_data_cbv.base_shader_register,
455 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
456
457 nir_def *load_desc = nir_load_vulkan_descriptor(
458 builder, nir_address_format_num_components(ubo_format),
459 nir_address_format_bit_size(ubo_format),
460 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
461
462 dyn_yz_flip_mask =
463 nir_load_ubo(builder, 1, 32,
464 nir_channel(builder, load_desc, 0),
465 nir_imm_int(builder, offset),
466 .align_mul = 256,
467 .align_offset = offset,
468 .range_base = offset,
469 .range = 4);
470 *data->reads_sysval_ubo = true;
471 }
472
473 if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_UNCONDITIONAL)
474 y_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.y_mask);
475 else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Y_FLIP_CONDITIONAL)
476 y_flip_mask = nir_iand_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Y_FLIP_MASK);
477
478 if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_UNCONDITIONAL)
479 z_flip_mask = nir_imm_int(builder, rt_conf->yz_flip.z_mask);
480 else if (rt_conf->yz_flip.mode & DXIL_SPIRV_Z_FLIP_CONDITIONAL)
481 z_flip_mask = nir_ushr_imm(builder, dyn_yz_flip_mask, DXIL_SPIRV_Z_FLIP_SHIFT);
482
483 /* TODO: Multi-viewport */
484
485 if (y_flip_mask) {
486 nir_def *flip = nir_test_mask(builder, y_flip_mask, 1);
487
488 // Z-flip => pos.y = -pos.y
489 y_pos = nir_bcsel(builder, flip, nir_fneg(builder, y_pos), y_pos);
490 }
491
492 if (z_flip_mask) {
493 nir_def *flip = nir_test_mask(builder, z_flip_mask, 1);
494
495 // Z-flip => pos.z = -pos.z + 1.0f
496 z_pos = nir_bcsel(builder, flip,
497 nir_fadd_imm(builder, nir_fneg(builder, z_pos), 1.0f),
498 z_pos);
499 }
500
501 nir_def *def = nir_vec4(builder,
502 nir_channel(builder, pos, 0),
503 y_pos,
504 z_pos,
505 nir_channel(builder, pos, 3));
506 nir_src_rewrite(&intrin->src[1], def);
507 return true;
508 }
509
510 bool
dxil_spirv_nir_lower_yz_flip(nir_shader * shader,const struct dxil_spirv_runtime_conf * rt_conf,bool * reads_sysval_ubo)511 dxil_spirv_nir_lower_yz_flip(nir_shader *shader,
512 const struct dxil_spirv_runtime_conf *rt_conf,
513 bool *reads_sysval_ubo)
514 {
515 struct lower_yz_flip_data data = {
516 .rt_conf = rt_conf,
517 .reads_sysval_ubo = reads_sysval_ubo,
518 };
519
520 return nir_shader_instructions_pass(shader, lower_yz_flip,
521 nir_metadata_control_flow |
522 nir_metadata_loop_analysis,
523 &data);
524 }
525
526 static bool
discard_psiz_access(struct nir_builder * builder,nir_intrinsic_instr * intrin,void * cb_data)527 discard_psiz_access(struct nir_builder *builder, nir_intrinsic_instr *intrin,
528 void *cb_data)
529 {
530 if (intrin->intrinsic != nir_intrinsic_store_deref &&
531 intrin->intrinsic != nir_intrinsic_load_deref)
532 return false;
533
534 nir_variable *var = nir_intrinsic_get_var(intrin, 0);
535 if (!var || var->data.mode != nir_var_shader_out ||
536 var->data.location != VARYING_SLOT_PSIZ)
537 return false;
538
539 builder->cursor = nir_before_instr(&intrin->instr);
540
541 if (intrin->intrinsic == nir_intrinsic_load_deref)
542 nir_def_rewrite_uses(&intrin->def, nir_imm_float(builder, 1.0));
543
544 nir_instr_remove(&intrin->instr);
545 return true;
546 }
547
548 static bool
dxil_spirv_nir_discard_point_size_var(nir_shader * shader)549 dxil_spirv_nir_discard_point_size_var(nir_shader *shader)
550 {
551 if (shader->info.stage != MESA_SHADER_VERTEX &&
552 shader->info.stage != MESA_SHADER_TESS_EVAL &&
553 shader->info.stage != MESA_SHADER_GEOMETRY)
554 return false;
555
556 nir_variable *psiz = NULL;
557 nir_foreach_shader_out_variable(var, shader) {
558 if (var->data.location == VARYING_SLOT_PSIZ) {
559 psiz = var;
560 break;
561 }
562 }
563
564 if (!psiz)
565 return false;
566
567 if (!nir_shader_intrinsics_pass(shader, discard_psiz_access,
568 nir_metadata_control_flow |
569 nir_metadata_loop_analysis,
570 NULL))
571 return false;
572
573 nir_remove_dead_derefs(shader);
574 return true;
575 }
576
577 struct lower_pntc_data {
578 const struct dxil_spirv_runtime_conf *conf;
579 nir_variable *pntc;
580 };
581
582 static bool
write_pntc_with_pos(nir_builder * b,nir_instr * instr,void * _data)583 write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
584 {
585 struct lower_pntc_data *data = (struct lower_pntc_data *)_data;
586 if (instr->type != nir_instr_type_intrinsic)
587 return false;
588 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
589 if (intr->intrinsic != nir_intrinsic_store_deref)
590 return false;
591 nir_variable *var = nir_intrinsic_get_var(intr, 0);
592 if (!var || var->data.location != VARYING_SLOT_POS)
593 return false;
594
595 nir_def *pos = intr->src[1].ssa;
596
597 unsigned offset =
598 offsetof(struct dxil_spirv_vertex_runtime_data, viewport_width) - 4;
599 static_assert(offsetof(struct dxil_spirv_vertex_runtime_data, viewport_width) % 16 == 4,
600 "Doing vector unpacking with this assumption");
601 nir_address_format ubo_format = nir_address_format_32bit_index_offset;
602
603 b->cursor = nir_before_instr(instr);
604 nir_def *index = nir_vulkan_resource_index(
605 b, nir_address_format_num_components(ubo_format),
606 nir_address_format_bit_size(ubo_format),
607 nir_imm_int(b, 0),
608 .desc_set = data->conf->runtime_data_cbv.register_space,
609 .binding = data->conf->runtime_data_cbv.base_shader_register,
610 .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
611
612 nir_def *load_desc = nir_load_vulkan_descriptor(
613 b, nir_address_format_num_components(ubo_format),
614 nir_address_format_bit_size(ubo_format),
615 index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
616
617 nir_def *transform = nir_channels(b,
618 nir_load_ubo(b, 4, 32,
619 nir_channel(b, load_desc, 0),
620 nir_imm_int(b, offset),
621 .align_mul = 16,
622 .range_base = offset,
623 .range = 16),
624 0x6);
625 nir_def *point_center_in_clip = nir_fmul(b, nir_trim_vector(b, pos, 2),
626 nir_frcp(b, nir_channel(b, pos, 3)));
627 nir_def *point_center =
628 nir_fmul(b, nir_fadd_imm(b,
629 nir_fmul(b, point_center_in_clip,
630 nir_vec2(b, nir_imm_float(b, 0.5), nir_imm_float(b, -0.5f))),
631 0.5), transform);
632 nir_store_var(b, data->pntc, nir_pad_vec4(b, point_center), 0xf);
633 return true;
634 }
635
636 static void
dxil_spirv_write_pntc(nir_shader * nir,const struct dxil_spirv_runtime_conf * conf)637 dxil_spirv_write_pntc(nir_shader *nir, const struct dxil_spirv_runtime_conf *conf)
638 {
639 struct lower_pntc_data data = { .conf = conf };
640 data.pntc = nir_variable_create(nir, nir_var_shader_out, glsl_vec4_type(), "gl_PointCoord");
641 data.pntc->data.location = VARYING_SLOT_PNTC;
642 nir_shader_instructions_pass(nir, write_pntc_with_pos,
643 nir_metadata_control_flow |
644 nir_metadata_loop_analysis,
645 &data);
646 nir->info.outputs_written |= VARYING_BIT_PNTC;
647
648 /* Add the runtime data var if it's not already there */
649 nir_binding binding = {
650 .binding = conf->runtime_data_cbv.base_shader_register,
651 .desc_set = conf->runtime_data_cbv.register_space,
652 .success = true,
653 };
654 nir_variable *ubo_var = nir_get_binding_variable(nir, binding);
655 if (!ubo_var)
656 add_runtime_data_var(nir, conf->runtime_data_cbv.register_space, conf->runtime_data_cbv.base_shader_register);
657 }
658
659 static bool
lower_pntc_read(nir_builder * b,nir_intrinsic_instr * intr,void * data)660 lower_pntc_read(nir_builder *b, nir_intrinsic_instr *intr, void *data)
661 {
662 if (intr->intrinsic != nir_intrinsic_load_deref)
663 return false;
664 nir_variable *var = nir_intrinsic_get_var(intr, 0);
665 if (!var || var->data.location != VARYING_SLOT_PNTC)
666 return false;
667
668 nir_def *point_center = &intr->def;
669 nir_variable *pos_var = (nir_variable *)data;
670
671 b->cursor = nir_after_instr(&intr->instr);
672
673 nir_def *pos;
674 if (var->data.sample == pos_var->data.sample)
675 pos = nir_load_var(b, pos_var);
676 else if (var->data.sample)
677 pos = nir_interp_deref_at_sample(b, 4, 32,
678 &nir_build_deref_var(b, pos_var)->def,
679 nir_load_sample_id(b));
680 else
681 pos = nir_interp_deref_at_offset(b, 4, 32,
682 &nir_build_deref_var(b, pos_var)->def,
683 nir_imm_zero(b, 2, 32));
684
685 nir_def *pntc = nir_fadd_imm(b,
686 nir_fsub(b, nir_trim_vector(b, pos, 2), nir_trim_vector(b, point_center, 2)),
687 0.5);
688 nir_def_rewrite_uses_after(point_center, pntc, pntc->parent_instr);
689 return true;
690 }
691
692 static void
dxil_spirv_compute_pntc(nir_shader * nir)693 dxil_spirv_compute_pntc(nir_shader *nir)
694 {
695 nir_variable *pos = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_POS);
696 if (!pos) {
697 pos = nir_variable_create(nir, nir_var_shader_in, glsl_vec4_type(), "gl_FragCoord");
698 pos->data.location = VARYING_SLOT_POS;
699 pos->data.sample = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_PNTC)->data.sample;
700 }
701 nir_shader_intrinsics_pass(nir, lower_pntc_read,
702 nir_metadata_control_flow |
703 nir_metadata_loop_analysis,
704 pos);
705 }
706
707 static bool
lower_view_index_to_rt_layer_instr(nir_builder * b,nir_intrinsic_instr * intr,void * data)708 lower_view_index_to_rt_layer_instr(nir_builder *b, nir_intrinsic_instr *intr,
709 void *data)
710 {
711 if (intr->intrinsic != nir_intrinsic_store_deref)
712 return false;
713
714 nir_variable *var = nir_intrinsic_get_var(intr, 0);
715 if (!var ||
716 var->data.mode != nir_var_shader_out ||
717 var->data.location != VARYING_SLOT_LAYER)
718 return false;
719
720 b->cursor = nir_before_instr(&intr->instr);
721 nir_def *layer = intr->src[1].ssa;
722 nir_def *new_layer = nir_iadd(b, layer,
723 nir_load_view_index(b));
724 nir_src_rewrite(&intr->src[1], new_layer);
725 return true;
726 }
727
728 static bool
add_layer_write(nir_builder * b,nir_instr * instr,void * data)729 add_layer_write(nir_builder *b, nir_instr *instr, void *data)
730 {
731 if (instr) {
732 if (instr->type != nir_instr_type_intrinsic)
733 return false;
734 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
735 if (intr->intrinsic != nir_intrinsic_emit_vertex &&
736 intr->intrinsic != nir_intrinsic_emit_vertex_with_counter)
737 return false;
738 b->cursor = nir_before_instr(instr);
739 }
740 nir_variable *var = (nir_variable *)data;
741 nir_store_var(b, var, nir_load_view_index(b), 0x1);
742 return true;
743 }
744
745 static void
lower_view_index_to_rt_layer(nir_shader * nir)746 lower_view_index_to_rt_layer(nir_shader *nir)
747 {
748 bool existing_write =
749 nir_shader_intrinsics_pass(nir, lower_view_index_to_rt_layer_instr,
750 nir_metadata_control_flow |
751 nir_metadata_loop_analysis, NULL);
752
753 if (existing_write)
754 return;
755
756 nir_variable *var = nir_variable_create(nir, nir_var_shader_out,
757 glsl_uint_type(), "gl_Layer");
758 var->data.location = VARYING_SLOT_LAYER;
759 var->data.interpolation = INTERP_MODE_FLAT;
760 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
761 nir_shader_instructions_pass(nir,
762 add_layer_write,
763 nir_metadata_control_flow |
764 nir_metadata_loop_analysis, var);
765 } else {
766 nir_function_impl *func = nir_shader_get_entrypoint(nir);
767 nir_builder b = nir_builder_at(nir_after_impl(func));
768 add_layer_write(&b, NULL, var);
769 }
770 }
771
772 void
dxil_spirv_nir_link(nir_shader * nir,nir_shader * prev_stage_nir,const struct dxil_spirv_runtime_conf * conf,struct dxil_spirv_metadata * metadata)773 dxil_spirv_nir_link(nir_shader *nir, nir_shader *prev_stage_nir,
774 const struct dxil_spirv_runtime_conf *conf,
775 struct dxil_spirv_metadata *metadata)
776 {
777 glsl_type_singleton_init_or_ref();
778
779 metadata->requires_runtime_data = false;
780 if (prev_stage_nir) {
781 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
782 nir->info.clip_distance_array_size = prev_stage_nir->info.clip_distance_array_size;
783
784 if (nir->info.inputs_read & VARYING_BIT_PNTC) {
785 NIR_PASS_V(prev_stage_nir, dxil_spirv_write_pntc, conf);
786 NIR_PASS_V(nir, dxil_spirv_compute_pntc);
787 metadata->requires_runtime_data = true;
788 }
789 }
790
791 NIR_PASS_V(nir, dxil_nir_kill_undefined_varyings, prev_stage_nir->info.outputs_written, prev_stage_nir->info.patch_outputs_written, NULL);
792 NIR_PASS_V(prev_stage_nir, dxil_nir_kill_unused_outputs, nir->info.inputs_read, nir->info.patch_inputs_read, NULL);
793
794 dxil_reassign_driver_locations(nir, nir_var_shader_in, prev_stage_nir->info.outputs_written, NULL);
795 dxil_reassign_driver_locations(prev_stage_nir, nir_var_shader_out, nir->info.inputs_read, NULL);
796
797 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
798 assert(prev_stage_nir->info.stage == MESA_SHADER_TESS_CTRL);
799 nir->info.tess.tcs_vertices_out = prev_stage_nir->info.tess.tcs_vertices_out;
800 prev_stage_nir->info.tess = nir->info.tess;
801
802 for (uint32_t i = 0; i < 2; ++i) {
803 unsigned loc = i == 0 ? VARYING_SLOT_TESS_LEVEL_OUTER : VARYING_SLOT_TESS_LEVEL_INNER;
804 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_in, loc);
805 if (!var) {
806 var = nir_variable_create(nir, nir_var_shader_in, glsl_array_type(glsl_float_type(), i == 0 ? 4 : 2, 0), i == 0 ? "outer" : "inner");
807 var->data.location = loc;
808 var->data.patch = true;
809 var->data.compact = true;
810 }
811 }
812 }
813 }
814
815 glsl_type_singleton_decref();
816 }
817
818 static unsigned
lower_bit_size_callback(const nir_instr * instr,void * data)819 lower_bit_size_callback(const nir_instr *instr, void *data)
820 {
821 if (instr->type != nir_instr_type_intrinsic)
822 return 0;
823 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
824 switch (intr->intrinsic) {
825 case nir_intrinsic_quad_swap_horizontal:
826 case nir_intrinsic_quad_swap_vertical:
827 case nir_intrinsic_quad_swap_diagonal:
828 case nir_intrinsic_reduce:
829 case nir_intrinsic_inclusive_scan:
830 case nir_intrinsic_exclusive_scan:
831 return intr->def.bit_size == 1 ? 32 : 0;
832 default:
833 return 0;
834 }
835 }
836
837 static bool
merge_ubos_and_ssbos(nir_shader * nir)838 merge_ubos_and_ssbos(nir_shader *nir)
839 {
840 bool progress = false;
841 nir_foreach_variable_with_modes_safe(var, nir, nir_var_mem_ubo | nir_var_mem_ssbo) {
842 nir_variable *other_var = NULL;
843 nir_foreach_variable_with_modes(var2, nir, var->data.mode) {
844 if (var->data.descriptor_set == var2->data.descriptor_set &&
845 var->data.binding == var2->data.binding) {
846 other_var = var2;
847 break;
848 }
849 }
850
851 if (!other_var)
852 continue;
853
854 progress = true;
855 /* Merge types */
856 if (var->type != other_var->type) {
857 /* Pick the larger array size */
858 uint32_t desc_array_size = 1;
859 if (glsl_type_is_array(var->type))
860 desc_array_size = glsl_get_aoa_size(var->type);
861 if (glsl_type_is_array(other_var->type))
862 desc_array_size = MAX2(desc_array_size, glsl_get_aoa_size(other_var->type));
863
864 const glsl_type *struct_type = glsl_without_array(var->type);
865 if (var->data.mode == nir_var_mem_ubo) {
866 /* Pick the larger struct type; doesn't matter for ssbos */
867 uint32_t size = glsl_get_explicit_size(struct_type, false);
868 const glsl_type *other_type = glsl_without_array(other_var->type);
869 if (glsl_get_explicit_size(other_type, false) > size)
870 struct_type = other_type;
871 }
872
873 var->type = glsl_array_type(struct_type, desc_array_size, 0);
874
875 /* An ssbo is non-writeable if all aliased vars are non-writeable */
876 if (var->data.mode == nir_var_mem_ssbo)
877 var->data.access &= ~(other_var->data.access & ACCESS_NON_WRITEABLE);
878
879 exec_node_remove(&other_var->node);
880 }
881 }
882 nir_shader_preserve_all_metadata(nir);
883 return progress;
884 }
885
886 void
dxil_spirv_nir_passes(nir_shader * nir,const struct dxil_spirv_runtime_conf * conf,struct dxil_spirv_metadata * metadata)887 dxil_spirv_nir_passes(nir_shader *nir,
888 const struct dxil_spirv_runtime_conf *conf,
889 struct dxil_spirv_metadata *metadata)
890 {
891 glsl_type_singleton_init_or_ref();
892
893 NIR_PASS_V(nir, nir_lower_io_to_vector,
894 nir_var_shader_out |
895 (nir->info.stage != MESA_SHADER_VERTEX ? nir_var_shader_in : 0));
896 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_shader_out);
897 NIR_PASS_V(nir, nir_remove_dead_derefs);
898
899 const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
900 .frag_coord = true,
901 .point_coord = true,
902 .front_face = true,
903 };
904 NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
905
906 NIR_PASS_V(nir, nir_lower_system_values);
907
908 nir_lower_compute_system_values_options compute_options = {
909 .has_base_workgroup_id = conf->workgroup_id_mode != DXIL_SPIRV_SYSVAL_TYPE_ZERO,
910 };
911 NIR_PASS_V(nir, nir_lower_compute_system_values, &compute_options);
912 NIR_PASS_V(nir, dxil_nir_lower_subgroup_id);
913 NIR_PASS_V(nir, dxil_nir_lower_num_subgroups);
914
915 nir_lower_subgroups_options subgroup_options = {
916 .ballot_bit_size = 32,
917 .ballot_components = 4,
918 .lower_subgroup_masks = true,
919 .lower_to_scalar = true,
920 .lower_relative_shuffle = true,
921 .lower_inverse_ballot = true,
922 };
923 if (nir->info.stage != MESA_SHADER_FRAGMENT &&
924 nir->info.stage != MESA_SHADER_COMPUTE)
925 subgroup_options.lower_quad = true;
926 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
927 NIR_PASS_V(nir, nir_lower_bit_size, lower_bit_size_callback, NULL);
928
929 // Ensure subgroup scans on bools are gone
930 NIR_PASS_V(nir, nir_opt_dce);
931 NIR_PASS_V(nir, dxil_nir_lower_unsupported_subgroup_scan);
932
933 // Force sample-rate shading if we're asked to.
934 if (conf->force_sample_rate_shading) {
935 assert(nir->info.stage == MESA_SHADER_FRAGMENT);
936 nir->info.fs.uses_sample_shading = true;
937 }
938
939 if (conf->first_vertex_and_base_instance_mode == DXIL_SPIRV_SYSVAL_TYPE_ZERO) {
940 // vertex_id and instance_id should have already been transformed to
941 // base zero before spirv_to_dxil was called. Therefore, we can zero out
942 // base/firstVertex/Instance.
943 gl_system_value system_values[] = {SYSTEM_VALUE_FIRST_VERTEX,
944 SYSTEM_VALUE_BASE_VERTEX,
945 SYSTEM_VALUE_BASE_INSTANCE};
946 NIR_PASS_V(nir, dxil_nir_lower_system_values_to_zero, system_values,
947 ARRAY_SIZE(system_values));
948 }
949
950 if (conf->lower_view_index_to_rt_layer)
951 NIR_PASS_V(nir, lower_view_index_to_rt_layer);
952
953 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
954 metadata->needs_draw_sysvals = BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
955 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE);
956
957 NIR_PASS(metadata->requires_runtime_data, nir,
958 dxil_spirv_nir_lower_shader_system_values,
959 conf);
960
961 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
962 NIR_PASS_V(nir, nir_lower_input_attachments,
963 &(nir_input_attachment_options){
964 .use_fragcoord_sysval = false,
965 .use_layer_id_sysval = !conf->lower_view_index,
966 .use_view_id_for_layer = !conf->lower_view_index,
967 });
968
969 NIR_PASS_V(nir, dxil_nir_lower_discard_and_terminate);
970 NIR_PASS_V(nir, nir_lower_returns);
971 NIR_PASS_V(nir, dxil_nir_lower_sample_pos);
972 NIR_PASS_V(nir, nir_lower_fragcoord_wtrans);
973 }
974
975 NIR_PASS_V(nir, nir_opt_deref);
976
977 NIR_PASS_V(nir, nir_lower_memory_model);
978 NIR_PASS_V(nir, dxil_nir_lower_coherent_loads_and_stores);
979
980 if (conf->inferred_read_only_images_as_srvs) {
981 const nir_opt_access_options opt_access_options = {
982 .is_vulkan = true,
983 };
984 NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
985 }
986
987 NIR_PASS_V(nir, dxil_spirv_nir_discard_point_size_var);
988
989 NIR_PASS_V(nir, nir_remove_dead_variables,
990 nir_var_shader_in | nir_var_shader_out |
991 nir_var_system_value | nir_var_mem_shared,
992 NULL);
993
994 uint32_t push_constant_size = 0;
995 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
996 nir_address_format_32bit_offset);
997 NIR_PASS_V(nir, dxil_spirv_nir_lower_load_push_constant,
998 nir_address_format_32bit_index_offset,
999 conf->push_constant_cbv.register_space,
1000 conf->push_constant_cbv.base_shader_register,
1001 &push_constant_size);
1002
1003 NIR_PASS_V(nir, dxil_spirv_nir_lower_buffer_device_address);
1004 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
1005 nir_address_format_32bit_index_offset);
1006 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
1007 nir_address_format_32bit_index_offset_pack64);
1008
1009 if (nir->info.shared_memory_explicit_layout) {
1010 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared,
1011 shared_var_info);
1012 NIR_PASS_V(nir, dxil_nir_split_unaligned_loads_stores, nir_var_mem_shared);
1013 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
1014 } else {
1015 NIR_PASS_V(nir, nir_split_struct_vars, nir_var_mem_shared);
1016 NIR_PASS_V(nir, dxil_nir_flatten_var_arrays, nir_var_mem_shared);
1017 NIR_PASS_V(nir, dxil_nir_lower_var_bit_size, nir_var_mem_shared,
1018 conf->shader_model_max >= SHADER_MODEL_6_2 ? 16 : 32, 64);
1019 }
1020
1021 NIR_PASS_V(nir, dxil_nir_lower_int_cubemaps, false);
1022
1023 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
1024 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
1025 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
1026 NIR_PASS_V(nir, nir_split_var_copies);
1027 NIR_PASS_V(nir, nir_lower_var_copies);
1028 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
1029
1030
1031 if (conf->yz_flip.mode != DXIL_SPIRV_YZ_FLIP_NONE) {
1032 assert(nir->info.stage == MESA_SHADER_VERTEX ||
1033 nir->info.stage == MESA_SHADER_GEOMETRY ||
1034 nir->info.stage == MESA_SHADER_TESS_EVAL);
1035 NIR_PASS_V(nir,
1036 dxil_spirv_nir_lower_yz_flip,
1037 conf, &metadata->requires_runtime_data);
1038 }
1039
1040 if (metadata->requires_runtime_data) {
1041 add_runtime_data_var(nir, conf->runtime_data_cbv.register_space,
1042 conf->runtime_data_cbv.base_shader_register);
1043 }
1044
1045 if (push_constant_size > 0) {
1046 add_push_constant_var(nir, push_constant_size,
1047 conf->push_constant_cbv.register_space,
1048 conf->push_constant_cbv.base_shader_register);
1049 }
1050
1051 NIR_PASS_V(nir, nir_lower_fp16_casts, nir_lower_fp16_all & ~nir_lower_fp16_rtz);
1052 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
1053 NIR_PASS_V(nir, nir_opt_dce);
1054 NIR_PASS_V(nir, dxil_nir_lower_double_math);
1055
1056 {
1057 bool progress;
1058 do
1059 {
1060 progress = false;
1061 NIR_PASS(progress, nir, nir_copy_prop);
1062 NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
1063 NIR_PASS(progress, nir, nir_opt_deref);
1064 NIR_PASS(progress, nir, nir_opt_dce);
1065 NIR_PASS(progress, nir, nir_opt_undef);
1066 NIR_PASS(progress, nir, nir_opt_constant_folding);
1067 NIR_PASS(progress, nir, nir_opt_cse);
1068 if (nir_opt_loop(nir)) {
1069 progress = true;
1070 NIR_PASS(progress, nir, nir_copy_prop);
1071 NIR_PASS(progress, nir, nir_opt_dce);
1072 }
1073 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
1074 NIR_PASS(progress, nir, nir_opt_algebraic);
1075 NIR_PASS(progress, nir, nir_opt_dead_cf);
1076 NIR_PASS(progress, nir, nir_opt_remove_phis);
1077 } while (progress);
1078 }
1079
1080 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
1081 NIR_PASS_V(nir, nir_split_struct_vars, nir_var_function_temp);
1082 NIR_PASS_V(nir, dxil_nir_flatten_var_arrays, nir_var_function_temp);
1083 NIR_PASS_V(nir, dxil_nir_lower_var_bit_size, nir_var_function_temp,
1084 conf->shader_model_max >= SHADER_MODEL_6_2 ? 16 : 32, 64);
1085
1086 NIR_PASS_V(nir, nir_lower_doubles, NULL, nir->options->lower_doubles_options);
1087
1088 if (conf->declared_read_only_images_as_srvs)
1089 NIR_PASS_V(nir, nir_lower_readonly_images_to_tex, true);
1090 nir_lower_tex_options lower_tex_options = {
1091 .lower_txp = UINT32_MAX,
1092 .lower_invalid_implicit_lod = true,
1093 .lower_tg4_offsets = true,
1094 };
1095 NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
1096
1097 NIR_PASS_V(nir, dxil_nir_split_clip_cull_distance);
1098 const struct dxil_nir_lower_loads_stores_options loads_stores_options = {
1099 .use_16bit_ssbo = conf->shader_model_max >= SHADER_MODEL_6_2,
1100 };
1101 NIR_PASS_V(nir, dxil_nir_lower_loads_stores_to_dxil, &loads_stores_options);
1102 NIR_PASS_V(nir, dxil_nir_split_typed_samplers);
1103 NIR_PASS_V(nir, dxil_nir_lower_ubo_array_one_to_static);
1104 NIR_PASS_V(nir, nir_opt_dce);
1105 NIR_PASS_V(nir, nir_remove_dead_derefs);
1106 NIR_PASS_V(nir, nir_remove_dead_variables,
1107 nir_var_uniform | nir_var_shader_in | nir_var_shader_out,
1108 NULL);
1109 NIR_PASS_V(nir, merge_ubos_and_ssbos);
1110
1111 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1112 dxil_sort_ps_outputs(nir);
1113 } else {
1114 /* Dummy linking step so we get different driver_location
1115 * assigned even if there's just a single vertex shader in the
1116 * pipeline. The real linking happens in dxil_spirv_nir_link().
1117 */
1118 dxil_reassign_driver_locations(nir, nir_var_shader_out, 0, NULL);
1119 }
1120
1121 if (nir->info.stage == MESA_SHADER_VERTEX) {
1122 nir_foreach_variable_with_modes(var, nir, nir_var_shader_in) {
1123 /* spirv_to_dxil() only emits generic vertex attributes. */
1124 assert(var->data.location >= VERT_ATTRIB_GENERIC0);
1125 var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
1126 }
1127
1128 dxil_sort_by_driver_location(nir, nir_var_shader_in);
1129 } else {
1130 dxil_reassign_driver_locations(nir, nir_var_shader_in, 0, NULL);
1131 }
1132
1133 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
1134
1135 glsl_type_singleton_decref();
1136 }
1137