1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27 * make a lot this go away.
28 */
29
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "util/u_prim.h"
33 #include "nir.h"
34 #include "nir_deref.h"
35 #include "tgsi/tgsi_scan.h"
36 #include "tgsi/tgsi_from_mesa.h"
37
tex_get_texture_var(const nir_tex_instr * instr)38 static nir_variable* tex_get_texture_var(const nir_tex_instr *instr)
39 {
40 for (unsigned i = 0; i < instr->num_srcs; i++) {
41 switch (instr->src[i].src_type) {
42 case nir_tex_src_texture_deref:
43 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
44 default:
45 break;
46 }
47 }
48
49 return NULL;
50 }
51
gather_usage_helper(const nir_deref_instr ** deref_ptr,unsigned location,uint8_t mask,uint8_t * usage_mask)52 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
53 unsigned location,
54 uint8_t mask,
55 uint8_t *usage_mask)
56 {
57 for (; *deref_ptr; deref_ptr++) {
58 const nir_deref_instr *deref = *deref_ptr;
59 switch (deref->deref_type) {
60 case nir_deref_type_array: {
61 bool is_compact = nir_deref_instr_get_variable(deref)->data.compact;
62 unsigned elem_size = is_compact ? DIV_ROUND_UP(glsl_get_length(deref->type), 4) :
63 glsl_count_attribute_slots(deref->type, false);
64 if (nir_src_is_const(deref->arr.index)) {
65 if (is_compact) {
66 location += nir_src_as_uint(deref->arr.index) / 4;
67 mask <<= nir_src_as_uint(deref->arr.index) % 4;
68 } else
69 location += elem_size * nir_src_as_uint(deref->arr.index);
70 } else {
71 unsigned array_elems =
72 glsl_get_length(deref_ptr[-1]->type);
73 for (unsigned i = 0; i < array_elems; i++) {
74 gather_usage_helper(deref_ptr + 1,
75 location + elem_size * i,
76 mask, usage_mask);
77 }
78 return;
79 }
80 break;
81 }
82 case nir_deref_type_struct: {
83 const struct glsl_type *parent_type =
84 deref_ptr[-1]->type;
85 unsigned index = deref->strct.index;
86 for (unsigned i = 0; i < index; i++) {
87 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
88 location += glsl_count_attribute_slots(ft, false);
89 }
90 break;
91 }
92 default:
93 unreachable("Unhandled deref type in gather_components_used_helper");
94 }
95 }
96
97 usage_mask[location] |= mask & 0xf;
98 if (mask & 0xf0)
99 usage_mask[location + 1] |= (mask >> 4) & 0xf;
100 }
101
gather_usage(const nir_deref_instr * deref,uint8_t mask,uint8_t * usage_mask)102 static void gather_usage(const nir_deref_instr *deref,
103 uint8_t mask,
104 uint8_t *usage_mask)
105 {
106 nir_deref_path path;
107 nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
108
109 unsigned location_frac = path.path[0]->var->data.location_frac;
110 if (glsl_type_is_64bit(deref->type)) {
111 uint8_t new_mask = 0;
112 for (unsigned i = 0; i < 4; i++) {
113 if (mask & (1 << i))
114 new_mask |= 0x3 << (2 * i);
115 }
116 mask = new_mask << location_frac;
117 } else {
118 mask <<= location_frac;
119 mask &= 0xf;
120 }
121
122 gather_usage_helper((const nir_deref_instr **)&path.path[1],
123 path.path[0]->var->data.driver_location,
124 mask, usage_mask);
125
126 nir_deref_path_finish(&path);
127 }
128
gather_intrinsic_load_deref_info(const nir_shader * nir,const nir_intrinsic_instr * instr,const nir_deref_instr * deref,bool need_texcoord,const nir_variable * var,struct tgsi_shader_info * info)129 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
130 const nir_intrinsic_instr *instr,
131 const nir_deref_instr *deref,
132 bool need_texcoord,
133 const nir_variable *var,
134 struct tgsi_shader_info *info)
135 {
136 assert(var && var->data.mode == nir_var_shader_in);
137
138 if (nir->info.stage == MESA_SHADER_FRAGMENT)
139 gather_usage(deref, nir_def_components_read(&instr->def),
140 info->input_usage_mask);
141
142 switch (nir->info.stage) {
143 case MESA_SHADER_VERTEX: {
144
145 break;
146 }
147 default: {
148 unsigned semantic_name, semantic_index;
149 tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
150 &semantic_name, &semantic_index);
151
152 if (semantic_name == TGSI_SEMANTIC_FACE) {
153 info->uses_frontface = true;
154 }
155 break;
156 }
157 }
158 }
159
scan_instruction(const struct nir_shader * nir,bool need_texcoord,struct tgsi_shader_info * info,const nir_instr * instr)160 static void scan_instruction(const struct nir_shader *nir,
161 bool need_texcoord,
162 struct tgsi_shader_info *info,
163 const nir_instr *instr)
164 {
165 info->num_instructions = 2;
166
167 if (instr->type == nir_instr_type_tex) {
168 nir_tex_instr *tex = nir_instr_as_tex(instr);
169
170 switch (tex->op) {
171 case nir_texop_tex:
172 info->opcode_count[TGSI_OPCODE_TEX]++;
173 FALLTHROUGH;
174 default:
175 break;
176 }
177 } else if (instr->type == nir_instr_type_intrinsic) {
178 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
179
180 switch (intr->intrinsic) {
181 case nir_intrinsic_load_front_face:
182 info->uses_frontface = 1;
183 break;
184 case nir_intrinsic_load_instance_id:
185 info->uses_instanceid = 1;
186 break;
187 case nir_intrinsic_load_invocation_id:
188 info->uses_invocationid = true;
189 break;
190 case nir_intrinsic_load_num_workgroups:
191 info->uses_grid_size = true;
192 break;
193 case nir_intrinsic_load_vertex_id:
194 info->uses_vertexid = 1;
195 break;
196 case nir_intrinsic_load_vertex_id_zero_base:
197 info->uses_vertexid_nobase = 1;
198 break;
199 case nir_intrinsic_load_base_vertex:
200 info->uses_basevertex = 1;
201 break;
202 case nir_intrinsic_load_primitive_id:
203 info->uses_primid = 1;
204 break;
205 case nir_intrinsic_bindless_image_store:
206 info->writes_memory = true;
207 break;
208 case nir_intrinsic_image_deref_store:
209 case nir_intrinsic_image_store:
210 info->writes_memory = true;
211 break;
212 case nir_intrinsic_bindless_image_atomic:
213 case nir_intrinsic_bindless_image_atomic_swap:
214 info->writes_memory = true;
215 break;
216 case nir_intrinsic_image_deref_atomic:
217 case nir_intrinsic_image_deref_atomic_swap:
218 case nir_intrinsic_image_atomic:
219 case nir_intrinsic_image_atomic_swap:
220 info->writes_memory = true;
221 break;
222 case nir_intrinsic_store_ssbo:
223 case nir_intrinsic_ssbo_atomic:
224 case nir_intrinsic_ssbo_atomic_swap:
225 info->writes_memory = true;
226 break;
227 case nir_intrinsic_interp_deref_at_centroid:
228 case nir_intrinsic_interp_deref_at_offset:
229 case nir_intrinsic_interp_deref_at_sample:
230 case nir_intrinsic_interp_deref_at_vertex:
231 case nir_intrinsic_load_deref: {
232 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
233 const nir_variable_mode mode = var->data.mode;
234 nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
235
236 if (nir_deref_instr_has_indirect(deref)) {
237 if (mode == nir_var_shader_in)
238 info->indirect_files |= (1 << TGSI_FILE_INPUT);
239 }
240 if (mode == nir_var_shader_in)
241 gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
242 break;
243 }
244 default:
245 break;
246 }
247 }
248 }
249
nir_tgsi_scan_shader(const struct nir_shader * nir,struct tgsi_shader_info * info,bool need_texcoord)250 void nir_tgsi_scan_shader(const struct nir_shader *nir,
251 struct tgsi_shader_info *info,
252 bool need_texcoord)
253 {
254 unsigned i;
255
256 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
257 info->num_instructions = 1;
258
259 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
260 pipe_shader_type_from_mesa(nir->info.next_stage);
261
262 if (nir->info.stage == MESA_SHADER_VERTEX) {
263 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
264 nir->info.vs.window_space_position;
265 }
266
267 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
268 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
269 nir->info.tess.tcs_vertices_out;
270 }
271
272 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
273 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = u_tess_prim_from_shader(nir->info.tess._primitive_mode);
274
275 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
276 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
277 PIPE_TESS_SPACING_FRACTIONAL_ODD);
278 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
279 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
280
281 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
282 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
283 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
284 }
285
286 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
287 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
288 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
289 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
290 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
291 }
292
293 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
294 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
295 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
296 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
297 info->uses_fbfetch = nir->info.fs.uses_fbfetch_output;
298
299 if (nir->info.fs.pixel_center_integer) {
300 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
301 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
302 }
303
304 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
305 switch (nir->info.fs.depth_layout) {
306 case FRAG_DEPTH_LAYOUT_ANY:
307 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
308 break;
309 case FRAG_DEPTH_LAYOUT_GREATER:
310 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
311 break;
312 case FRAG_DEPTH_LAYOUT_LESS:
313 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
314 break;
315 case FRAG_DEPTH_LAYOUT_UNCHANGED:
316 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
317 break;
318 default:
319 unreachable("Unknow depth layout");
320 }
321 }
322 }
323
324 if (gl_shader_stage_is_compute(nir->info.stage) ||
325 gl_shader_stage_is_mesh(nir->info.stage)) {
326 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0];
327 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1];
328 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.workgroup_size[2];
329 }
330
331 i = 0;
332 uint64_t processed_inputs = 0;
333 nir_foreach_shader_in_variable(variable, nir) {
334 unsigned semantic_name, semantic_index;
335
336 const struct glsl_type *type = variable->type;
337 if (nir_is_arrayed_io(variable, nir->info.stage)) {
338 assert(glsl_type_is_array(type));
339 type = glsl_get_array_element(type);
340 }
341
342 unsigned attrib_count = nir_variable_count_slots(variable, type);
343
344 i = variable->data.driver_location;
345
346 /* Vertex shader inputs don't have semantics. The state
347 * tracker has already mapped them to attributes via
348 * variable->data.driver_location.
349 */
350 if (nir->info.stage == MESA_SHADER_VERTEX) {
351 continue;
352 }
353
354 for (unsigned j = 0; j < attrib_count; j++, i++) {
355
356 if (processed_inputs & ((uint64_t)1 << i))
357 continue;
358
359 processed_inputs |= ((uint64_t)1 << i);
360
361 tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
362 &semantic_name, &semantic_index);
363
364 info->input_semantic_name[i] = semantic_name;
365 info->input_semantic_index[i] = semantic_index;
366
367 if (semantic_name == TGSI_SEMANTIC_PRIMID)
368 info->uses_primid = true;
369
370 enum glsl_base_type base_type =
371 glsl_get_base_type(glsl_without_array(variable->type));
372
373 if (variable->data.centroid)
374 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
375 if (variable->data.sample)
376 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
377
378 switch (variable->data.interpolation) {
379 case INTERP_MODE_NONE:
380 if (glsl_base_type_is_integer(base_type) || variable->data.per_primitive) {
381 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
382 break;
383 }
384
385 if (semantic_name == TGSI_SEMANTIC_COLOR) {
386 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
387 break;
388 }
389 FALLTHROUGH;
390
391 case INTERP_MODE_SMOOTH:
392 assert(!glsl_base_type_is_integer(base_type));
393
394 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
395 break;
396
397 case INTERP_MODE_NOPERSPECTIVE:
398 assert(!glsl_base_type_is_integer(base_type));
399
400 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
401 break;
402
403 case INTERP_MODE_FLAT:
404 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
405 break;
406 }
407 }
408 }
409
410 info->num_inputs = nir->num_inputs;
411 if (nir->info.io_lowered) {
412 info->num_inputs = util_bitcount64(nir->info.inputs_read);
413 if (nir->info.inputs_read_indirectly)
414 info->indirect_files |= 1 << TGSI_FILE_INPUT;
415 info->file_max[TGSI_FILE_INPUT] = info->num_inputs - 1;
416 } else {
417 int max = info->file_max[TGSI_FILE_INPUT] = -1;
418 nir_foreach_shader_in_variable(var, nir) {
419 int slots = glsl_count_attribute_slots(var->type, false);
420 int tmax = var->data.driver_location + slots - 1;
421 if (tmax > max)
422 max = tmax;
423 info->file_max[TGSI_FILE_INPUT] = max;
424 }
425 }
426
427 i = 0;
428 uint64_t processed_outputs = 0;
429 unsigned num_outputs = 0;
430 nir_foreach_shader_out_variable(variable, nir) {
431 unsigned semantic_name, semantic_index;
432
433 i = variable->data.driver_location;
434
435 const struct glsl_type *type = variable->type;
436 if (nir_is_arrayed_io(variable, nir->info.stage)) {
437 assert(glsl_type_is_array(type));
438 type = glsl_get_array_element(type);
439 }
440
441 unsigned attrib_count = nir_variable_count_slots(variable, type);
442 for (unsigned k = 0; k < attrib_count; k++, i++) {
443
444 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
445 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
446 &semantic_name, &semantic_index);
447
448 /* Adjust for dual source blending */
449 if (variable->data.index > 0) {
450 semantic_index++;
451 }
452 } else {
453 tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
454 &semantic_name, &semantic_index);
455 }
456
457 unsigned num_components = 4;
458 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
459 if (vector_elements)
460 num_components = vector_elements;
461
462 unsigned component = variable->data.location_frac;
463 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
464 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
465 num_components = (num_components * 2) - 4;
466 component = 0;
467 } else {
468 num_components = MIN2(num_components * 2, 4);
469 }
470 }
471
472 uint8_t usagemask = 0;
473 for (unsigned j = component; j < num_components + component; j++) {
474 switch (j) {
475 case 0:
476 usagemask |= TGSI_WRITEMASK_X;
477 break;
478 case 1:
479 usagemask |= TGSI_WRITEMASK_Y;
480 break;
481 case 2:
482 usagemask |= TGSI_WRITEMASK_Z;
483 break;
484 case 3:
485 usagemask |= TGSI_WRITEMASK_W;
486 break;
487 default:
488 unreachable("error calculating component index");
489 }
490 }
491
492 unsigned gs_out_streams;
493 if (variable->data.stream & NIR_STREAM_PACKED) {
494 gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
495 } else {
496 assert(variable->data.stream < 4);
497 gs_out_streams = 0;
498 for (unsigned j = 0; j < num_components; ++j)
499 gs_out_streams |= variable->data.stream << (2 * (component + j));
500 }
501
502 unsigned streamx = gs_out_streams & 3;
503 unsigned streamy = (gs_out_streams >> 2) & 3;
504 unsigned streamz = (gs_out_streams >> 4) & 3;
505 unsigned streamw = (gs_out_streams >> 6) & 3;
506
507 if (usagemask & TGSI_WRITEMASK_X) {
508 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
509 info->output_streams[i] |= streamx;
510 info->num_stream_output_components[streamx]++;
511 }
512 if (usagemask & TGSI_WRITEMASK_Y) {
513 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
514 info->output_streams[i] |= streamy << 2;
515 info->num_stream_output_components[streamy]++;
516 }
517 if (usagemask & TGSI_WRITEMASK_Z) {
518 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
519 info->output_streams[i] |= streamz << 4;
520 info->num_stream_output_components[streamz]++;
521 }
522 if (usagemask & TGSI_WRITEMASK_W) {
523 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
524 info->output_streams[i] |= streamw << 6;
525 info->num_stream_output_components[streamw]++;
526 }
527
528 /* make sure we only count this location once against
529 * the num_outputs counter.
530 */
531 if (processed_outputs & ((uint64_t)1 << i))
532 continue;
533
534 processed_outputs |= ((uint64_t)1 << i);
535 num_outputs++;
536
537 info->output_semantic_name[i] = semantic_name;
538 info->output_semantic_index[i] = semantic_index;
539
540 switch (semantic_name) {
541 case TGSI_SEMANTIC_VIEWPORT_INDEX:
542 info->writes_viewport_index = true;
543 break;
544 case TGSI_SEMANTIC_LAYER:
545 info->writes_layer = true;
546 break;
547 case TGSI_SEMANTIC_PSIZE:
548 info->writes_psize = true;
549 break;
550 case TGSI_SEMANTIC_CLIPVERTEX:
551 info->writes_clipvertex = true;
552 break;
553 case TGSI_SEMANTIC_STENCIL:
554 if (!variable->data.fb_fetch_output)
555 info->writes_stencil = true;
556 break;
557 case TGSI_SEMANTIC_SAMPLEMASK:
558 info->writes_samplemask = true;
559 break;
560 case TGSI_SEMANTIC_EDGEFLAG:
561 info->writes_edgeflag = true;
562 break;
563 case TGSI_SEMANTIC_POSITION:
564 if (info->processor == PIPE_SHADER_FRAGMENT) {
565 if (!variable->data.fb_fetch_output)
566 info->writes_z = true;
567 } else {
568 info->writes_position = true;
569 }
570 break;
571 }
572
573 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
574 switch (semantic_name) {
575 case TGSI_SEMANTIC_PATCH:
576 info->reads_perpatch_outputs = true;
577 break;
578 case TGSI_SEMANTIC_TESSINNER:
579 case TGSI_SEMANTIC_TESSOUTER:
580 info->reads_tessfactor_outputs = true;
581 break;
582 default:
583 info->reads_pervertex_outputs = true;
584 }
585 }
586 }
587
588 unsigned loc = variable->data.location;
589 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
590 loc == FRAG_RESULT_COLOR &&
591 nir->info.outputs_written & (1ull << loc)) {
592 assert(attrib_count == 1);
593 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
594 }
595 }
596
597 if (nir->info.io_lowered) {
598 uint64_t outputs_written = nir->info.outputs_written;
599
600 while (outputs_written) {
601 unsigned location = u_bit_scan64(&outputs_written);
602 unsigned i = util_bitcount64(nir->info.outputs_written &
603 BITFIELD64_MASK(location));
604 unsigned semantic_name, semantic_index;
605
606 tgsi_get_gl_varying_semantic(location, need_texcoord,
607 &semantic_name, &semantic_index);
608
609 info->output_semantic_name[i] = semantic_name;
610 info->output_semantic_index[i] = semantic_index;
611 info->output_usagemask[i] = 0xf;
612 }
613 num_outputs = util_bitcount64(nir->info.outputs_written);
614 if (nir->info.outputs_accessed_indirectly)
615 info->indirect_files |= 1 << TGSI_FILE_OUTPUT;
616 }
617
618 info->num_outputs = num_outputs;
619
620 info->const_file_max[0] = nir->num_uniforms - 1;
621 info->images_declared = nir->info.images_used[0];
622 info->samplers_declared = nir->info.textures_used[0];
623
624 info->file_max[TGSI_FILE_SAMPLER] = BITSET_LAST_BIT(nir->info.samplers_used) - 1;
625 info->file_max[TGSI_FILE_SAMPLER_VIEW] = BITSET_LAST_BIT(nir->info.textures_used) - 1;
626 info->file_mask[TGSI_FILE_SAMPLER] = nir->info.samplers_used[0];
627 info->file_mask[TGSI_FILE_SAMPLER_VIEW] = nir->info.textures_used[0];
628 info->file_max[TGSI_FILE_IMAGE] = BITSET_LAST_BIT(nir->info.images_used) - 1;
629 info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
630
631 info->num_written_clipdistance = nir->info.clip_distance_array_size;
632 info->num_written_culldistance = nir->info.cull_distance_array_size;
633
634 if (info->processor == PIPE_SHADER_FRAGMENT)
635 info->uses_kill = nir->info.fs.uses_discard;
636
637 nir_function *func = (struct nir_function *)
638 exec_list_get_head_const(&nir->functions);
639
640 nir_foreach_block(block, func->impl) {
641 nir_foreach_instr(instr, block)
642 scan_instruction(nir, need_texcoord, info, instr);
643 }
644 }
645