1 /* -*- mesa-c++ -*-
2 * Copyright 2022 Collabora LTD
3 * Author: Gert Wollny <[email protected]>
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "sfn_shader_vs.h"
8
9 #include "../r600_asm.h"
10
11 #include "sfn_debug.h"
12 #include "sfn_instr_alugroup.h"
13 #include "sfn_instr_export.h"
14
15 namespace r600 {
16
17 uint32_t
enabled_stream_buffers_mask() const18 VertexStageShader::enabled_stream_buffers_mask() const
19 {
20 return m_enabled_stream_buffers_mask;
21 }
22
23 void
combine_enabled_stream_buffers_mask(uint32_t mask)24 VertexStageShader::combine_enabled_stream_buffers_mask(uint32_t mask)
25 {
26 m_enabled_stream_buffers_mask = mask;
27 }
28
29 bool
store_output(nir_intrinsic_instr & intr)30 VertexExportStage::store_output(nir_intrinsic_instr& intr)
31 {
32 auto index = nir_src_as_const_value(intr.src[1]);
33 assert(index && "Indirect outputs not supported");
34
35 const store_loc store_info = {nir_intrinsic_component(&intr),
36 nir_intrinsic_io_semantics(&intr).location,
37 (unsigned)nir_intrinsic_base(&intr) + index->u32,
38 0};
39
40 return do_store_output(store_info, intr);
41 }
42
VertexExportStage(VertexStageShader * parent)43 VertexExportStage::VertexExportStage(VertexStageShader *parent):
44 m_parent(parent)
45 {
46 }
47
VertexExportForFs(VertexStageShader * parent,const pipe_stream_output_info * so_info,const r600_shader_key & key)48 VertexExportForFs::VertexExportForFs(VertexStageShader *parent,
49 const pipe_stream_output_info *so_info,
50 const r600_shader_key& key):
51 VertexExportStage(parent),
52 m_vs_as_gs_a(key.vs.as_gs_a),
53 m_so_info(so_info)
54 {
55 }
56
57 bool
do_store_output(const store_loc & store_info,nir_intrinsic_instr & intr)58 VertexExportForFs::do_store_output(const store_loc& store_info, nir_intrinsic_instr& intr)
59 {
60 switch (store_info.location) {
61
62 case VARYING_SLOT_PSIZ:
63 m_writes_point_size = true;
64 FALLTHROUGH;
65 case VARYING_SLOT_POS:
66 return emit_varying_pos(store_info, intr);
67 case VARYING_SLOT_EDGE: {
68 std::array<uint8_t, 4> swizzle_override = {7, 0, 7, 7};
69 return emit_varying_pos(store_info, intr, &swizzle_override);
70 }
71 case VARYING_SLOT_VIEWPORT: {
72 std::array<uint8_t, 4> swizzle_override = {7, 7, 7, 0};
73 return emit_varying_pos(store_info, intr, &swizzle_override) &&
74 emit_varying_param(store_info, intr);
75 }
76 case VARYING_SLOT_CLIP_VERTEX:
77 return emit_clip_vertices(store_info, intr);
78 case VARYING_SLOT_CLIP_DIST0:
79 case VARYING_SLOT_CLIP_DIST1: {
80 bool success = emit_varying_pos(store_info, intr);
81 m_num_clip_dist += 4;
82 if (!nir_intrinsic_io_semantics(&intr).no_varying)
83 success &= emit_varying_param(store_info, intr);
84 return success;
85 }
86 case VARYING_SLOT_LAYER: {
87 m_out_misc_write = 1;
88 m_vs_out_layer = 1;
89 std::array<uint8_t, 4> swz = {7, 7, 0, 7};
90 return emit_varying_pos(store_info, intr, &swz) &&
91 emit_varying_param(store_info, intr);
92 }
93 case VARYING_SLOT_VIEW_INDEX:
94 return emit_varying_pos(store_info, intr) && emit_varying_param(store_info, intr);
95
96 default:
97 return emit_varying_param(store_info, intr);
98 return false;
99 }
100 }
101
102 bool
emit_clip_vertices(const store_loc & store_info,const nir_intrinsic_instr & instr)103 VertexExportForFs::emit_clip_vertices(const store_loc& store_info,
104 const nir_intrinsic_instr& instr)
105 {
106 auto& vf = m_parent->value_factory();
107
108 m_cc_dist_mask = 0xff;
109 m_clip_dist_write = 0xff;
110
111 m_clip_vertex = vf.src_vec4(instr.src[store_info.data_loc], pin_group, {0, 1, 2, 3});
112
113 m_output_registers[nir_intrinsic_base(&instr)] = &m_clip_vertex;
114
115 return true;
116 }
117
118 void
get_shader_info(r600_shader * sh_info) const119 VertexExportForFs::get_shader_info(r600_shader *sh_info) const
120 {
121 sh_info->cc_dist_mask = m_cc_dist_mask;
122 sh_info->clip_dist_write = m_clip_dist_write;
123 sh_info->vs_as_gs_a = m_vs_as_gs_a;
124 sh_info->vs_out_edgeflag = m_out_edgeflag;
125 sh_info->vs_out_viewport = m_out_viewport;
126 sh_info->vs_out_misc_write = m_out_misc_write;
127 sh_info->vs_out_point_size = m_out_point_size;
128 sh_info->vs_out_layer = m_vs_out_layer;
129 }
130
131 void
finalize()132 VertexExportForFs::finalize()
133 {
134 if (m_vs_as_gs_a) {
135 auto primid = m_parent->value_factory().temp_vec4(pin_group, {2, 7, 7, 7});
136 m_parent->emit_instruction(new AluInstr(
137 op1_mov, primid[0], m_parent->primitive_id(), AluInstr::last_write));
138 int param = m_last_param_export ? m_last_param_export->location() + 1 : 0;
139
140 m_last_param_export = new ExportInstr(ExportInstr::param, param, primid);
141 m_parent->emit_instruction(m_last_param_export);
142
143 ShaderOutput output(m_parent->noutputs(), 1, VARYING_SLOT_PRIMITIVE_ID);
144 output.set_export_param(param);
145 m_parent->add_output(output);
146 }
147
148 if (!m_last_pos_export) {
149 RegisterVec4 value(0, false, {7, 7, 7, 7});
150 m_last_pos_export = new ExportInstr(ExportInstr::pos, 0, value);
151 m_parent->emit_instruction(m_last_pos_export);
152 }
153
154 if (!m_last_param_export) {
155 RegisterVec4 value(0, false, {7, 7, 7, 7});
156 m_last_param_export = new ExportInstr(ExportInstr::param, 0, value);
157 m_parent->emit_instruction(m_last_param_export);
158 }
159
160 m_last_pos_export->set_is_last_export(true);
161 m_last_param_export->set_is_last_export(true);
162
163 if (m_so_info && m_so_info->num_outputs)
164 emit_stream(-1);
165 }
166
167 void
do_get_shader_info(r600_shader * sh_info)168 VertexShader::do_get_shader_info(r600_shader *sh_info)
169 {
170 sh_info->processor_type = PIPE_SHADER_VERTEX;
171 m_export_stage->get_shader_info(sh_info);
172 }
173
174 bool
emit_varying_pos(const store_loc & store_info,nir_intrinsic_instr & intr,std::array<uint8_t,4> * swizzle_override)175 VertexExportForFs::emit_varying_pos(const store_loc& store_info,
176 nir_intrinsic_instr& intr,
177 std::array<uint8_t, 4> *swizzle_override)
178 {
179 RegisterVec4::Swizzle swizzle;
180 uint32_t write_mask = 0;
181
182 write_mask = nir_intrinsic_write_mask(&intr) << store_info.frac;
183
184 if (!swizzle_override) {
185 for (int i = 0; i < 4; ++i)
186 swizzle[i] = ((1 << i) & write_mask) ? i - store_info.frac : 7;
187 } else
188 std::copy(swizzle_override->begin(), swizzle_override->end(), swizzle.begin());
189
190 int export_slot = 0;
191
192 auto in_value = m_parent->value_factory().src_vec4(intr.src[0], pin_group, swizzle);
193 auto& value = in_value;
194 RegisterVec4 out_value = m_parent->value_factory().temp_vec4(pin_group, swizzle);
195
196 switch (store_info.location) {
197 case VARYING_SLOT_EDGE: {
198 m_out_misc_write = true;
199 m_out_edgeflag = true;
200 auto src = m_parent->value_factory().src(intr.src[0], 0);
201 auto clamped = m_parent->value_factory().temp_register();
202 m_parent->emit_instruction(
203 new AluInstr(op1_mov, clamped, src, {alu_write, alu_dst_clamp, alu_last_instr}));
204 auto alu =
205 new AluInstr(op1_flt_to_int, out_value[1], clamped, AluInstr::last_write);
206 if (m_parent->chip_class() < ISA_CC_EVERGREEN)
207 alu->set_alu_flag(alu_is_trans);
208 m_parent->emit_instruction(alu);
209
210 value = out_value;
211 }
212 FALLTHROUGH;
213 case VARYING_SLOT_PSIZ:
214 m_out_misc_write = true;
215 m_out_point_size = true;
216 FALLTHROUGH;
217 case VARYING_SLOT_LAYER:
218 export_slot = 1;
219 break;
220 case VARYING_SLOT_VIEWPORT:
221 m_out_misc_write = true;
222 m_out_viewport = true;
223 export_slot = 1;
224 break;
225 case VARYING_SLOT_POS:
226 break;
227 case VARYING_SLOT_CLIP_DIST0:
228 case VARYING_SLOT_CLIP_DIST1:
229 m_cc_dist_mask |= write_mask
230 << (4 * (store_info.location - VARYING_SLOT_CLIP_DIST0));
231 m_clip_dist_write |= write_mask
232 << (4 * (store_info.location - VARYING_SLOT_CLIP_DIST0));
233 export_slot = m_cur_clip_pos++;
234 break;
235 default:
236 sfn_log << SfnLog::err << __func__ << "Unsupported location " << store_info.location
237 << "\n";
238 return false;
239 }
240
241 m_last_pos_export = new ExportInstr(ExportInstr::pos, export_slot, value);
242
243 m_output_registers[nir_intrinsic_base(&intr)] = &m_last_pos_export->value();
244
245 m_parent->emit_instruction(m_last_pos_export);
246
247 return true;
248 }
249
250 bool
emit_varying_param(const store_loc & store_info,nir_intrinsic_instr & intr)251 VertexExportForFs::emit_varying_param(const store_loc& store_info,
252 nir_intrinsic_instr& intr)
253 {
254 sfn_log << SfnLog::io << __func__ << ": emit DDL: " << store_info.driver_location
255 << "\n";
256
257 int write_mask = nir_intrinsic_write_mask(&intr) << store_info.frac;
258 RegisterVec4::Swizzle swizzle;
259 for (int i = 0; i < 4; ++i)
260 swizzle[i] = ((1 << i) & write_mask) ? i - store_info.frac : 7;
261
262 Pin pin = util_bitcount(write_mask) > 1 ? pin_group : pin_free;
263
264 int export_slot = m_parent->output(nir_intrinsic_base(&intr)).export_param();
265 assert(export_slot >= 0);
266 auto value = m_parent->value_factory().temp_vec4(pin, swizzle);
267
268 AluInstr *alu = nullptr;
269 for (int i = 0; i < 4; ++i) {
270 if (swizzle[i] < 4) {
271 alu = new AluInstr(op1_mov,
272 value[i],
273 m_parent->value_factory().src(intr.src[0], swizzle[i]),
274 AluInstr::write);
275 m_parent->emit_instruction(alu);
276 }
277 }
278 if (alu)
279 alu->set_alu_flag(alu_last_instr);
280
281 m_last_param_export = new ExportInstr(ExportInstr::param, export_slot, value);
282 m_output_registers[nir_intrinsic_base(&intr)] = &m_last_param_export->value();
283
284 m_parent->emit_instruction(m_last_param_export);
285
286 return true;
287 }
288
289 bool
emit_stream(int stream)290 VertexExportForFs::emit_stream(int stream)
291 {
292 assert(m_so_info);
293 if (m_so_info->num_outputs > PIPE_MAX_SO_OUTPUTS) {
294 R600_ASM_ERR("Too many stream outputs: %d\n", m_so_info->num_outputs);
295 return false;
296 }
297 for (unsigned i = 0; i < m_so_info->num_outputs; i++) {
298 if (m_so_info->output[i].output_buffer >= 4) {
299 R600_ASM_ERR("Exceeded the max number of stream output buffers, got: %d\n",
300 m_so_info->output[i].output_buffer);
301 return false;
302 }
303 }
304 const RegisterVec4 *so_gpr[PIPE_MAX_SHADER_OUTPUTS];
305 unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
306 std::vector<RegisterVec4> tmp(m_so_info->num_outputs);
307
308 /* Initialize locations where the outputs are stored. */
309 for (unsigned i = 0; i < m_so_info->num_outputs; i++) {
310 if (stream != -1 && stream != m_so_info->output[i].stream)
311 continue;
312
313 sfn_log << SfnLog::instr << "Emit stream " << i << " with register index "
314 << m_so_info->output[i].register_index << " so_gpr:";
315
316 so_gpr[i] = output_register(m_so_info->output[i].register_index);
317
318 if (!so_gpr[i]) {
319 sfn_log << SfnLog::err << "\nERR: register index "
320 << m_so_info->output[i].register_index
321 << " doesn't correspond to an output register\n";
322 return false;
323 }
324 start_comp[i] = m_so_info->output[i].start_component;
325 /* Lower outputs with dst_offset < start_component.
326 *
327 * We can only output 4D vectors with a write mask, e.g. we can
328 * only output the W component at offset 3, etc. If we want
329 * to store Y, Z, or W at buffer offset 0, we need to use MOV
330 * to move it to X and output X. */
331
332 bool need_copy =
333 m_so_info->output[i].dst_offset < m_so_info->output[i].start_component;
334
335 int sc = m_so_info->output[i].start_component;
336 for (int j = 0; j < m_so_info->output[i].num_components; j++) {
337 if ((*so_gpr[i])[j + sc]->chan() != j + sc) {
338 need_copy = true;
339 break;
340 }
341 }
342 if (need_copy) {
343 RegisterVec4::Swizzle swizzle = {0, 1, 2, 3};
344 for (auto j = m_so_info->output[i].num_components; j < 4; ++j)
345 swizzle[j] = 7;
346 tmp[i] = m_parent->value_factory().temp_vec4(pin_group, swizzle);
347
348 AluInstr *alu = nullptr;
349 for (int j = 0; j < m_so_info->output[i].num_components; j++) {
350 alu = new AluInstr(op1_mov, tmp[i][j], (*so_gpr[i])[j + sc], {alu_write});
351 m_parent->emit_instruction(alu);
352 }
353 if (alu)
354 alu->set_alu_flag(alu_last_instr);
355
356 start_comp[i] = 0;
357 so_gpr[i] = &tmp[i];
358 }
359 sfn_log << SfnLog::instr << *so_gpr[i] << "\n";
360 }
361
362 uint32_t enabled_stream_buffers_mask = 0;
363 /* Write outputs to buffers. */
364 for (unsigned i = 0; i < m_so_info->num_outputs; i++) {
365 sfn_log << SfnLog::instr << "Write output buffer " << i << " with register index "
366 << m_so_info->output[i].register_index << "\n";
367
368 auto out_stream =
369 new StreamOutInstr(*so_gpr[i],
370 m_so_info->output[i].num_components,
371 m_so_info->output[i].dst_offset - start_comp[i],
372 ((1 << m_so_info->output[i].num_components) - 1)
373 << start_comp[i],
374 m_so_info->output[i].output_buffer,
375 m_so_info->output[i].stream);
376 m_parent->emit_instruction(out_stream);
377 enabled_stream_buffers_mask |= (1 << m_so_info->output[i].output_buffer)
378 << m_so_info->output[i].stream * 4;
379 }
380 m_parent->combine_enabled_stream_buffers_mask(enabled_stream_buffers_mask);
381 return true;
382 }
383
384 const RegisterVec4 *
output_register(int loc) const385 VertexExportForFs::output_register(int loc) const
386 {
387 const RegisterVec4 *retval = nullptr;
388 auto val = m_output_registers.find(loc);
389 if (val != m_output_registers.end())
390 retval = val->second;
391 return retval;
392 }
393
VertexShader(const pipe_stream_output_info * so_info,r600_shader * gs_shader,const r600_shader_key & key)394 VertexShader::VertexShader(const pipe_stream_output_info *so_info,
395 r600_shader *gs_shader,
396 const r600_shader_key& key):
397 VertexStageShader("VS", key.vs.first_atomic_counter),
398 m_vs_as_gs_a(key.vs.as_gs_a)
399 {
400 if (key.vs.as_es)
401 m_export_stage = new VertexExportForGS(this, gs_shader);
402 else if (key.vs.as_ls)
403 m_export_stage = new VertexExportForTCS(this);
404 else
405 m_export_stage = new VertexExportForFs(this, so_info, key);
406 }
407
408 bool
do_scan_instruction(nir_instr * instr)409 VertexShader::do_scan_instruction(nir_instr *instr)
410 {
411 if (instr->type != nir_instr_type_intrinsic)
412 return false;
413
414 auto intr = nir_instr_as_intrinsic(instr);
415
416 switch (intr->intrinsic) {
417 case nir_intrinsic_load_input: {
418 int vtx_register = nir_intrinsic_base(intr) + 1;
419 if (m_last_vertex_attribute_register < vtx_register)
420 m_last_vertex_attribute_register = vtx_register;
421 return true;
422 }
423 case nir_intrinsic_store_output: {
424 auto location = static_cast<gl_varying_slot>(nir_intrinsic_io_semantics(intr).location);
425
426 if (nir_intrinsic_io_semantics(intr).no_varying &&
427 (location == VARYING_SLOT_CLIP_DIST0 || location == VARYING_SLOT_CLIP_DIST1)) {
428 break;
429 }
430
431 int driver_location = nir_intrinsic_base(intr);
432
433 int write_mask =
434 location == VARYING_SLOT_LAYER ? 1 << 2 : nir_intrinsic_write_mask(intr);
435
436 ShaderOutput output(driver_location, write_mask, location);
437
438 add_output(output);
439 break;
440 }
441 case nir_intrinsic_load_vertex_id:
442 m_sv_values.set(es_vertexid);
443 break;
444 case nir_intrinsic_load_instance_id:
445 m_sv_values.set(es_instanceid);
446 break;
447 case nir_intrinsic_load_primitive_id:
448 m_sv_values.set(es_primitive_id);
449 break;
450 case nir_intrinsic_load_tcs_rel_patch_id_r600:
451 m_sv_values.set(es_rel_patch_id);
452 break;
453 default:
454 return false;
455 }
456
457 return true;
458 }
459
460 bool
load_input(nir_intrinsic_instr * intr)461 VertexShader::load_input(nir_intrinsic_instr *intr)
462 {
463 unsigned driver_location = nir_intrinsic_base(intr);
464 unsigned location = nir_intrinsic_io_semantics(intr).location;
465 auto& vf = value_factory();
466
467 AluInstr *ir = nullptr;
468 if (location < VERT_ATTRIB_MAX) {
469 for (unsigned i = 0; i < intr->def.num_components; ++i) {
470 auto src = vf.allocate_pinned_register(driver_location + 1, i);
471 src->set_flag(Register::ssa);
472 vf.inject_value(intr->def, i, src);
473 }
474 if (ir)
475 ir->set_alu_flag(alu_last_instr);
476
477 ShaderInput input(driver_location);
478 input.set_gpr(driver_location + 1);
479 add_input(input);
480 return true;
481 }
482 fprintf(stderr, "r600-NIR: Unimplemented load_deref for %d\n", location);
483 return false;
484 }
485
486 int
do_allocate_reserved_registers()487 VertexShader::do_allocate_reserved_registers()
488 {
489 if (m_sv_values.test(es_vertexid)) {
490 m_vertex_id = value_factory().allocate_pinned_register(0, 0);
491 }
492
493 if (m_sv_values.test(es_instanceid)) {
494 m_instance_id = value_factory().allocate_pinned_register(0, 3);
495 }
496
497 if (m_sv_values.test(es_primitive_id) || m_vs_as_gs_a) {
498 auto primitive_id = value_factory().allocate_pinned_register(0, 2);
499 set_primitive_id(primitive_id);
500 }
501
502 if (m_sv_values.test(es_rel_patch_id)) {
503 m_rel_vertex_id = value_factory().allocate_pinned_register(0, 1);
504 }
505
506 return m_last_vertex_attribute_register + 1;
507 }
508
509 bool
store_output(nir_intrinsic_instr * intr)510 VertexShader::store_output(nir_intrinsic_instr *intr)
511 {
512 return m_export_stage->store_output(*intr);
513 }
514
515 bool
process_stage_intrinsic(nir_intrinsic_instr * intr)516 VertexShader::process_stage_intrinsic(nir_intrinsic_instr *intr)
517 {
518 switch (intr->intrinsic) {
519 case nir_intrinsic_load_vertex_id:
520 return emit_simple_mov(intr->def, 0, m_vertex_id);
521 case nir_intrinsic_load_instance_id:
522 return emit_simple_mov(intr->def, 0, m_instance_id);
523 case nir_intrinsic_load_primitive_id:
524 return emit_simple_mov(intr->def, 0, primitive_id());
525 case nir_intrinsic_load_tcs_rel_patch_id_r600:
526 return emit_simple_mov(intr->def, 0, m_rel_vertex_id);
527 default:
528 return false;
529 }
530 }
531
532 void
do_finalize()533 VertexShader::do_finalize()
534 {
535 m_export_stage->finalize();
536 }
537
538 bool
read_prop(std::istream & is)539 VertexShader::read_prop(std::istream& is)
540 {
541 (void)is;
542 return false;
543 }
544
545 void
do_print_properties(std::ostream & os) const546 VertexShader::do_print_properties(std::ostream& os) const
547 {
548 (void)os;
549 }
550
VertexExportForGS(VertexStageShader * parent,const r600_shader * gs_shader)551 VertexExportForGS::VertexExportForGS(VertexStageShader *parent,
552 const r600_shader *gs_shader):
553 VertexExportStage(parent),
554 m_gs_shader(gs_shader)
555 {
556 }
557
558 bool
do_store_output(const store_loc & store_info,nir_intrinsic_instr & instr)559 VertexExportForGS::do_store_output(const store_loc& store_info,
560 nir_intrinsic_instr& instr)
561 {
562 int ring_offset = -1;
563 auto out_io = m_parent->output(store_info.driver_location);
564
565 sfn_log << SfnLog::io << "check output " << store_info.driver_location
566 << " varying_slot=" << static_cast<int>(out_io.varying_slot()) << "\n";
567
568 for (unsigned k = 0; k < m_gs_shader->ninput; ++k) {
569 auto& in_io = m_gs_shader->input[k];
570 sfn_log << SfnLog::io << " against " << k
571 << " varying_slot=" << static_cast<int>(in_io.varying_slot) << "\n";
572
573 if (in_io.varying_slot == out_io.varying_slot()) {
574 ring_offset = in_io.ring_offset;
575 break;
576 }
577 }
578
579 if (store_info.location == VARYING_SLOT_VIEWPORT) {
580 m_vs_out_viewport = 1;
581 m_vs_out_misc_write = 1;
582 return true;
583 }
584
585 if (ring_offset == -1) {
586 sfn_log << SfnLog::warn << "VS defines output at "
587 << store_info.driver_location
588 << " varying_slot=" << static_cast<int>(out_io.varying_slot())
589 << " that is not consumed as GS input\n";
590 return true;
591 }
592
593 RegisterVec4::Swizzle src_swz = {7, 7, 7, 7};
594 for (int i = 0; i < 4; ++i)
595 src_swz[i] = i < instr.num_components ? i : 7;
596
597 auto value = m_parent->value_factory().temp_vec4(pin_chgr, src_swz);
598
599 AluInstr *ir = nullptr;
600 for (unsigned int i = 0; i < instr.num_components; ++i) {
601 ir = new AluInstr(op1_mov,
602 value[i],
603 m_parent->value_factory().src(instr.src[store_info.data_loc], i),
604 AluInstr::write);
605 m_parent->emit_instruction(ir);
606 }
607 if (ir)
608 ir->set_alu_flag(alu_last_instr);
609
610 m_parent->emit_instruction(new MemRingOutInstr(
611 cf_mem_ring, MemRingOutInstr::mem_write, value, ring_offset >> 2, 4, nullptr));
612
613 if (store_info.location == VARYING_SLOT_CLIP_DIST0 ||
614 store_info.location == VARYING_SLOT_CLIP_DIST1)
615 m_num_clip_dist += 4;
616
617 return true;
618 }
619
620 void
finalize()621 VertexExportForGS::finalize()
622 {
623 }
624
625 void
get_shader_info(r600_shader * sh_info) const626 VertexExportForGS::get_shader_info(r600_shader *sh_info) const
627 {
628 sh_info->vs_out_viewport = m_vs_out_viewport;
629 sh_info->vs_out_misc_write = m_vs_out_misc_write;
630 sh_info->vs_as_es = true;
631 }
632
VertexExportForTCS(VertexStageShader * parent)633 VertexExportForTCS::VertexExportForTCS(VertexStageShader *parent):
634 VertexExportStage(parent)
635 {
636 }
637
638 void
finalize()639 VertexExportForTCS::finalize()
640 {
641 }
642
643 void
get_shader_info(r600_shader * sh_info) const644 VertexExportForTCS::get_shader_info(r600_shader *sh_info) const
645 {
646 sh_info->vs_as_ls = 1;
647 }
648
649 bool
do_store_output(const store_loc & store_info,nir_intrinsic_instr & intr)650 VertexExportForTCS::do_store_output(const store_loc& store_info,
651 nir_intrinsic_instr& intr)
652 {
653 (void)store_info;
654 (void)intr;
655 return true;
656 }
657
658 } // namespace r600
659