1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <[email protected]>
23 */
24
25 #include "compiler/nir/nir.h"
26 #include "compiler/nir/nir_builder.h"
27
28 #include "util/u_debug.h"
29 #include "util/u_prim.h"
30
31 #include "nv50_ir.h"
32 #include "nv50_ir_lowering_helper.h"
33 #include "nv50_ir_target.h"
34 #include "nv50_ir_util.h"
35 #include "tgsi/tgsi_from_mesa.h"
36
37 #include <unordered_map>
38 #include <cstring>
39 #include <list>
40 #include <vector>
41
42 namespace {
43
44 using namespace nv50_ir;
45
46 int
type_size(const struct glsl_type * type,bool bindless)47 type_size(const struct glsl_type *type, bool bindless)
48 {
49 return glsl_count_attribute_slots(type, false);
50 }
51
52 bool
nv50_nir_lower_load_user_clip_plane_cb(nir_builder * b,nir_intrinsic_instr * intrin,void * params)53 nv50_nir_lower_load_user_clip_plane_cb(nir_builder *b, nir_intrinsic_instr *intrin, void *params)
54 {
55 struct nv50_ir_prog_info *info = (struct nv50_ir_prog_info *)params;
56
57 if (intrin->intrinsic != nir_intrinsic_load_user_clip_plane)
58 return false;
59
60 uint16_t offset = info->io.ucpBase + nir_intrinsic_ucp_id(intrin) * 16;
61
62 b->cursor = nir_before_instr(&intrin->instr);
63 nir_def *replacement =
64 nir_load_ubo(b, 4, 32, nir_imm_int(b, info->io.auxCBSlot),
65 nir_imm_int(b, offset), .range = ~0u);
66
67 nir_def_replace(&intrin->def, replacement);
68
69 return true;
70 }
71
72 bool
nv50_nir_lower_load_user_clip_plane(nir_shader * nir,struct nv50_ir_prog_info * info)73 nv50_nir_lower_load_user_clip_plane(nir_shader *nir, struct nv50_ir_prog_info *info) {
74 return nir_shader_intrinsics_pass(nir, nv50_nir_lower_load_user_clip_plane_cb,
75 nir_metadata_control_flow,
76 info);
77 }
78
79 class Converter : public BuildUtil
80 {
81 public:
82 Converter(Program *, nir_shader *, nv50_ir_prog_info *, nv50_ir_prog_info_out *);
83
84 bool run();
85 private:
86 typedef std::vector<LValue*> LValues;
87 typedef std::unordered_map<unsigned, LValues> NirDefMap;
88 typedef std::unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
89 typedef std::unordered_map<unsigned, BasicBlock*> NirBlockMap;
90
91 CacheMode convert(enum gl_access_qualifier);
92 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
93 BasicBlock* convert(nir_block *);
94 SVSemantic convert(nir_intrinsic_op);
95 Value* convert(nir_load_const_instr*, uint8_t);
96 LValues& convert(nir_def *);
97
98 Value* getSrc(nir_alu_src *, uint8_t component = 0);
99 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
100 Value* getSrc(nir_def *, uint8_t);
101
102 // returned value is the constant part of the given source (either the
103 // nir_src or the selected source component of an intrinsic). Even though
104 // this is mostly an optimization to be able to skip indirects in a few
105 // cases, sometimes we require immediate values or set some fileds on
106 // instructions (e.g. tex) in order for codegen to consume those.
107 // If the found value has not a constant part, the Value gets returned
108 // through the Value parameter.
109 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
110 // isScalar indicates that the addressing is scalar, vec4 addressing is
111 // assumed otherwise
112 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
113 bool isScalar = false);
114
115 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
116
117 uint8_t translateInterpMode(const struct nv50_ir_varying *var, operation& op);
118 void setInterpolate(nv50_ir_varying *,
119 uint8_t,
120 bool centroid,
121 unsigned semantics);
122
123 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
124 uint8_t c, Value *indirect0 = NULL,
125 Value *indirect1 = NULL, bool patch = false,
126 CacheMode cache=CACHE_CA);
127 Instruction *loadVector(nir_intrinsic_instr *insn,
128 uint8_t buffer, Value *indirectBuffer,
129 uint32_t offset, Value *indirectOffset);
130 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
131 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
132 Value *indirect1 = NULL);
133 Instruction *storeVector(nir_intrinsic_instr *insn,
134 uint8_t buffer, Value *indirectBuffer,
135 uint32_t offset, Value *indirectOffset);
136
137 static bool memVectorizeCb(unsigned align_mul,
138 unsigned align_offset,
139 unsigned bit_size,
140 unsigned num_components,
141 nir_intrinsic_instr *low,
142 nir_intrinsic_instr *high,
143 void *cb_data);
144 static nir_mem_access_size_align
145 getMemAccessSizeAlign(nir_intrinsic_op intrin,
146 uint8_t bytes,
147 uint8_t bit_size,
148 uint32_t align_mul,
149 uint32_t align_offset,
150 bool offset_is_const,
151 const void *cb_data);
152
153 bool isFloatType(nir_alu_type);
154 bool isSignedType(nir_alu_type);
155 bool isResultFloat(nir_op);
156 bool isResultSigned(nir_op);
157
158 DataType getDType(nir_alu_instr *);
159 DataType getDType(nir_intrinsic_instr *);
160 DataType getDType(nir_op, uint8_t);
161
162 DataFile getFile(nir_intrinsic_op) const;
163
164 std::vector<DataType> getSTypes(nir_alu_instr *);
165 DataType getSType(nir_src &, bool isFloat, bool isSigned);
166
167 operation getOperation(nir_intrinsic_op);
168 operation getOperation(nir_op);
169 operation getOperation(nir_texop);
170 operation preOperationNeeded(nir_op);
171
172 int getSubOp(nir_intrinsic_op);
173 int getSubOp(nir_op);
174 int getAtomicSubOp(nir_atomic_op);
175
176 CondCode getCondCode(nir_op);
177
178 bool assignSlots();
179 bool parseNIR();
180
181 bool visit(nir_alu_instr *);
182 bool visit(nir_block *);
183 bool visit(nir_cf_node *);
184 bool visit(nir_function *);
185 bool visit(nir_if *);
186 bool visit(nir_instr *);
187 bool visit(nir_intrinsic_instr *);
188 bool visit(nir_jump_instr *);
189 bool visit(nir_load_const_instr*);
190 bool visit(nir_loop *);
191 bool visit(nir_undef_instr *);
192 bool visit(nir_tex_instr *);
193
194 static unsigned lowerBitSizeCB(const nir_instr *, void *);
195
196 // tex stuff
197 unsigned int getNIRArgCount(TexInstruction::Target&);
198
199 void runOptLoop();
200
201 struct nv50_ir_prog_info *info;
202 struct nv50_ir_prog_info_out *info_out;
203
204 nir_shader *nir;
205
206 NirDefMap ssaDefs;
207 NirDefMap regDefs;
208 ImmediateMap immediates;
209 NirBlockMap blocks;
210 unsigned int curLoopDepth;
211 unsigned int curIfDepth;
212
213 BasicBlock *exit;
214 Value *zero;
215 Instruction *immInsertPos;
216
217 Value *outBase; // base address of vertex out patch (for TCP)
218
219 union {
220 struct {
221 Value *position;
222 } fp;
223 };
224 };
225
Converter(Program * prog,nir_shader * nir,nv50_ir_prog_info * info,nv50_ir_prog_info_out * info_out)226 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info,
227 nv50_ir_prog_info_out *info_out)
228 : BuildUtil(prog),
229 info(info),
230 info_out(info_out),
231 nir(nir),
232 curLoopDepth(0),
233 curIfDepth(0),
234 exit(NULL),
235 immInsertPos(NULL),
236 outBase(nullptr)
237 {
238 zero = mkImm((uint32_t)0);
239 }
240
241 BasicBlock *
convert(nir_block * block)242 Converter::convert(nir_block *block)
243 {
244 NirBlockMap::iterator it = blocks.find(block->index);
245 if (it != blocks.end())
246 return it->second;
247
248 BasicBlock *bb = new BasicBlock(func);
249 blocks[block->index] = bb;
250 return bb;
251 }
252
253 bool
isFloatType(nir_alu_type type)254 Converter::isFloatType(nir_alu_type type)
255 {
256 return nir_alu_type_get_base_type(type) == nir_type_float;
257 }
258
259 bool
isSignedType(nir_alu_type type)260 Converter::isSignedType(nir_alu_type type)
261 {
262 return nir_alu_type_get_base_type(type) == nir_type_int;
263 }
264
265 bool
isResultFloat(nir_op op)266 Converter::isResultFloat(nir_op op)
267 {
268 const nir_op_info &info = nir_op_infos[op];
269 if (info.output_type != nir_type_invalid)
270 return isFloatType(info.output_type);
271
272 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
273 assert(false);
274 return true;
275 }
276
277 bool
isResultSigned(nir_op op)278 Converter::isResultSigned(nir_op op)
279 {
280 switch (op) {
281 // there is no umul and we get wrong results if we treat all muls as signed
282 case nir_op_imul:
283 case nir_op_inot:
284 return false;
285 default:
286 const nir_op_info &info = nir_op_infos[op];
287 if (info.output_type != nir_type_invalid)
288 return isSignedType(info.output_type);
289 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
290 assert(false);
291 return true;
292 }
293 }
294
295 DataType
getDType(nir_alu_instr * insn)296 Converter::getDType(nir_alu_instr *insn)
297 {
298 return getDType(insn->op, insn->def.bit_size);
299 }
300
301 DataType
getDType(nir_intrinsic_instr * insn)302 Converter::getDType(nir_intrinsic_instr *insn)
303 {
304 bool isFloat, isSigned;
305 switch (insn->intrinsic) {
306 case nir_intrinsic_bindless_image_atomic:
307 case nir_intrinsic_global_atomic:
308 case nir_intrinsic_image_atomic:
309 case nir_intrinsic_shared_atomic:
310 case nir_intrinsic_ssbo_atomic: {
311 nir_alu_type type = nir_atomic_op_type(nir_intrinsic_atomic_op(insn));
312 isFloat = type == nir_type_float;
313 isSigned = type == nir_type_int;
314 break;
315 }
316 default:
317 isFloat = false;
318 isSigned = false;
319 break;
320 }
321
322 return typeOfSize(insn->def.bit_size / 8, isFloat, isSigned);
323 }
324
325 DataType
getDType(nir_op op,uint8_t bitSize)326 Converter::getDType(nir_op op, uint8_t bitSize)
327 {
328 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
329 if (ty == TYPE_NONE) {
330 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
331 assert(false);
332 }
333 return ty;
334 }
335
336 std::vector<DataType>
getSTypes(nir_alu_instr * insn)337 Converter::getSTypes(nir_alu_instr *insn)
338 {
339 const nir_op_info &info = nir_op_infos[insn->op];
340 std::vector<DataType> res(info.num_inputs);
341
342 for (uint8_t i = 0; i < info.num_inputs; ++i) {
343 if (info.input_types[i] != nir_type_invalid) {
344 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
345 } else {
346 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
347 assert(false);
348 res[i] = TYPE_NONE;
349 break;
350 }
351 }
352
353 return res;
354 }
355
356 DataType
getSType(nir_src & src,bool isFloat,bool isSigned)357 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
358 {
359 const uint8_t bitSize = src.ssa->bit_size;
360
361 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
362 if (ty == TYPE_NONE) {
363 const char *str;
364 if (isFloat)
365 str = "float";
366 else if (isSigned)
367 str = "int";
368 else
369 str = "uint";
370 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
371 assert(false);
372 }
373 return ty;
374 }
375
376 DataFile
getFile(nir_intrinsic_op op) const377 Converter::getFile(nir_intrinsic_op op) const
378 {
379 switch (op) {
380 case nir_intrinsic_load_uniform:
381 case nir_intrinsic_load_ubo:
382 case nir_intrinsic_ldc_nv:
383 return FILE_MEMORY_CONST;
384 case nir_intrinsic_load_ssbo:
385 case nir_intrinsic_store_ssbo:
386 return FILE_MEMORY_BUFFER;
387 case nir_intrinsic_load_global:
388 case nir_intrinsic_store_global:
389 case nir_intrinsic_load_global_constant:
390 return FILE_MEMORY_GLOBAL;
391 case nir_intrinsic_load_scratch:
392 case nir_intrinsic_store_scratch:
393 return FILE_MEMORY_LOCAL;
394 case nir_intrinsic_load_shared:
395 case nir_intrinsic_store_shared:
396 return FILE_MEMORY_SHARED;
397 case nir_intrinsic_load_input:
398 case nir_intrinsic_load_interpolated_input:
399 case nir_intrinsic_load_kernel_input:
400 case nir_intrinsic_load_per_vertex_input:
401 return FILE_SHADER_INPUT;
402 case nir_intrinsic_load_output:
403 case nir_intrinsic_load_per_vertex_output:
404 case nir_intrinsic_store_output:
405 case nir_intrinsic_store_per_vertex_output:
406 return FILE_SHADER_OUTPUT;
407 default:
408 ERROR("couldn't get DataFile for op %s\n", nir_intrinsic_infos[op].name);
409 assert(false);
410 }
411 return FILE_NULL;
412 }
413
414 operation
getOperation(nir_op op)415 Converter::getOperation(nir_op op)
416 {
417 switch (op) {
418 // basic ops with float and int variants
419 case nir_op_fabs:
420 case nir_op_iabs:
421 return OP_ABS;
422 case nir_op_fadd:
423 case nir_op_iadd:
424 return OP_ADD;
425 case nir_op_iand:
426 return OP_AND;
427 case nir_op_ifind_msb:
428 case nir_op_ufind_msb:
429 return OP_BFIND;
430 case nir_op_fceil:
431 return OP_CEIL;
432 case nir_op_fcos:
433 return OP_COS;
434 case nir_op_f2f32:
435 case nir_op_f2f64:
436 case nir_op_f2i8:
437 case nir_op_f2i16:
438 case nir_op_f2i32:
439 case nir_op_f2i64:
440 case nir_op_f2u8:
441 case nir_op_f2u16:
442 case nir_op_f2u32:
443 case nir_op_f2u64:
444 case nir_op_i2f32:
445 case nir_op_i2f64:
446 case nir_op_i2i8:
447 case nir_op_i2i16:
448 case nir_op_i2i32:
449 case nir_op_i2i64:
450 case nir_op_u2f32:
451 case nir_op_u2f64:
452 case nir_op_u2u8:
453 case nir_op_u2u16:
454 case nir_op_u2u32:
455 case nir_op_u2u64:
456 return OP_CVT;
457 case nir_op_fdiv:
458 case nir_op_idiv:
459 case nir_op_udiv:
460 return OP_DIV;
461 case nir_op_fexp2:
462 return OP_EX2;
463 case nir_op_ffloor:
464 return OP_FLOOR;
465 case nir_op_ffma:
466 case nir_op_ffmaz:
467 /* No FMA op pre-nvc0 */
468 if (info->target < 0xc0)
469 return OP_MAD;
470 return OP_FMA;
471 case nir_op_flog2:
472 return OP_LG2;
473 case nir_op_fmax:
474 case nir_op_imax:
475 case nir_op_umax:
476 return OP_MAX;
477 case nir_op_pack_64_2x32_split:
478 return OP_MERGE;
479 case nir_op_fmin:
480 case nir_op_imin:
481 case nir_op_umin:
482 return OP_MIN;
483 case nir_op_fmod:
484 case nir_op_imod:
485 case nir_op_umod:
486 case nir_op_frem:
487 case nir_op_irem:
488 return OP_MOD;
489 case nir_op_fmul:
490 case nir_op_fmulz:
491 case nir_op_amul:
492 case nir_op_imul:
493 case nir_op_imul_high:
494 case nir_op_umul_high:
495 return OP_MUL;
496 case nir_op_fneg:
497 case nir_op_ineg:
498 return OP_NEG;
499 case nir_op_inot:
500 return OP_NOT;
501 case nir_op_ior:
502 return OP_OR;
503 case nir_op_frcp:
504 return OP_RCP;
505 case nir_op_frsq:
506 return OP_RSQ;
507 case nir_op_fsat:
508 return OP_SAT;
509 case nir_op_ieq8:
510 case nir_op_ige8:
511 case nir_op_uge8:
512 case nir_op_ilt8:
513 case nir_op_ult8:
514 case nir_op_ine8:
515 case nir_op_ieq16:
516 case nir_op_ige16:
517 case nir_op_uge16:
518 case nir_op_ilt16:
519 case nir_op_ult16:
520 case nir_op_ine16:
521 case nir_op_feq32:
522 case nir_op_ieq32:
523 case nir_op_fge32:
524 case nir_op_ige32:
525 case nir_op_uge32:
526 case nir_op_flt32:
527 case nir_op_ilt32:
528 case nir_op_ult32:
529 case nir_op_fneu32:
530 case nir_op_ine32:
531 return OP_SET;
532 case nir_op_ishl:
533 return OP_SHL;
534 case nir_op_ishr:
535 case nir_op_ushr:
536 return OP_SHR;
537 case nir_op_fsin:
538 return OP_SIN;
539 case nir_op_fsqrt:
540 return OP_SQRT;
541 case nir_op_ftrunc:
542 return OP_TRUNC;
543 case nir_op_ixor:
544 return OP_XOR;
545 default:
546 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
547 assert(false);
548 return OP_NOP;
549 }
550 }
551
552 operation
getOperation(nir_texop op)553 Converter::getOperation(nir_texop op)
554 {
555 switch (op) {
556 case nir_texop_tex:
557 return OP_TEX;
558 case nir_texop_lod:
559 return OP_TXLQ;
560 case nir_texop_txb:
561 return OP_TXB;
562 case nir_texop_txd:
563 return OP_TXD;
564 case nir_texop_txf:
565 case nir_texop_txf_ms:
566 return OP_TXF;
567 case nir_texop_tg4:
568 return OP_TXG;
569 case nir_texop_txl:
570 return OP_TXL;
571 case nir_texop_query_levels:
572 case nir_texop_texture_samples:
573 case nir_texop_txs:
574 return OP_TXQ;
575 default:
576 ERROR("couldn't get operation for nir_texop %u\n", op);
577 assert(false);
578 return OP_NOP;
579 }
580 }
581
582 operation
getOperation(nir_intrinsic_op op)583 Converter::getOperation(nir_intrinsic_op op)
584 {
585 switch (op) {
586 case nir_intrinsic_emit_vertex:
587 return OP_EMIT;
588 case nir_intrinsic_end_primitive:
589 return OP_RESTART;
590 case nir_intrinsic_bindless_image_atomic:
591 case nir_intrinsic_image_atomic:
592 case nir_intrinsic_bindless_image_atomic_swap:
593 case nir_intrinsic_image_atomic_swap:
594 return OP_SUREDP;
595 case nir_intrinsic_bindless_image_load:
596 case nir_intrinsic_image_load:
597 return OP_SULDP;
598 case nir_intrinsic_bindless_image_samples:
599 case nir_intrinsic_image_samples:
600 case nir_intrinsic_bindless_image_size:
601 case nir_intrinsic_image_size:
602 return OP_SUQ;
603 case nir_intrinsic_bindless_image_store:
604 case nir_intrinsic_image_store:
605 return OP_SUSTP;
606 case nir_intrinsic_ddx:
607 case nir_intrinsic_ddx_coarse:
608 case nir_intrinsic_ddx_fine:
609 return OP_DFDX;
610 case nir_intrinsic_ddy:
611 case nir_intrinsic_ddy_coarse:
612 case nir_intrinsic_ddy_fine:
613 return OP_DFDY;
614 default:
615 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
616 assert(false);
617 return OP_NOP;
618 }
619 }
620
621 operation
preOperationNeeded(nir_op op)622 Converter::preOperationNeeded(nir_op op)
623 {
624 switch (op) {
625 case nir_op_fcos:
626 case nir_op_fsin:
627 return OP_PRESIN;
628 default:
629 return OP_NOP;
630 }
631 }
632
633 int
getSubOp(nir_op op)634 Converter::getSubOp(nir_op op)
635 {
636 switch (op) {
637 case nir_op_imul_high:
638 case nir_op_umul_high:
639 return NV50_IR_SUBOP_MUL_HIGH;
640 case nir_op_ishl:
641 case nir_op_ishr:
642 case nir_op_ushr:
643 return NV50_IR_SUBOP_SHIFT_WRAP;
644 default:
645 return 0;
646 }
647 }
648
649 int
getAtomicSubOp(nir_atomic_op op)650 Converter::getAtomicSubOp(nir_atomic_op op)
651 {
652 switch (op) {
653 case nir_atomic_op_fadd:
654 case nir_atomic_op_iadd:
655 return NV50_IR_SUBOP_ATOM_ADD;
656 case nir_atomic_op_iand:
657 return NV50_IR_SUBOP_ATOM_AND;
658 case nir_atomic_op_cmpxchg:
659 return NV50_IR_SUBOP_ATOM_CAS;
660 case nir_atomic_op_imax:
661 case nir_atomic_op_umax:
662 return NV50_IR_SUBOP_ATOM_MAX;
663 case nir_atomic_op_imin:
664 case nir_atomic_op_umin:
665 return NV50_IR_SUBOP_ATOM_MIN;
666 case nir_atomic_op_xchg:
667 return NV50_IR_SUBOP_ATOM_EXCH;
668 case nir_atomic_op_ior:
669 return NV50_IR_SUBOP_ATOM_OR;
670 case nir_atomic_op_ixor:
671 return NV50_IR_SUBOP_ATOM_XOR;
672 case nir_atomic_op_dec_wrap:
673 return NV50_IR_SUBOP_ATOM_DEC;
674 case nir_atomic_op_inc_wrap:
675 return NV50_IR_SUBOP_ATOM_INC;
676 default:
677 ERROR("couldn't get SubOp for atomic\n");
678 assert(false);
679 return 0;
680 }
681 }
682
683 int
getSubOp(nir_intrinsic_op op)684 Converter::getSubOp(nir_intrinsic_op op)
685 {
686 switch (op) {
687 case nir_intrinsic_vote_all:
688 return NV50_IR_SUBOP_VOTE_ALL;
689 case nir_intrinsic_vote_any:
690 return NV50_IR_SUBOP_VOTE_ANY;
691 case nir_intrinsic_vote_ieq:
692 return NV50_IR_SUBOP_VOTE_UNI;
693 default:
694 return 0;
695 }
696 }
697
698 CondCode
getCondCode(nir_op op)699 Converter::getCondCode(nir_op op)
700 {
701 switch (op) {
702 case nir_op_ieq8:
703 case nir_op_ieq16:
704 case nir_op_feq32:
705 case nir_op_ieq32:
706 return CC_EQ;
707 case nir_op_ige8:
708 case nir_op_uge8:
709 case nir_op_ige16:
710 case nir_op_uge16:
711 case nir_op_fge32:
712 case nir_op_ige32:
713 case nir_op_uge32:
714 return CC_GE;
715 case nir_op_ilt8:
716 case nir_op_ult8:
717 case nir_op_ilt16:
718 case nir_op_ult16:
719 case nir_op_flt32:
720 case nir_op_ilt32:
721 case nir_op_ult32:
722 return CC_LT;
723 case nir_op_fneu32:
724 return CC_NEU;
725 case nir_op_ine8:
726 case nir_op_ine16:
727 case nir_op_ine32:
728 return CC_NE;
729 default:
730 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
731 assert(false);
732 return CC_FL;
733 }
734 }
735
736 Converter::LValues&
convert(nir_def * def)737 Converter::convert(nir_def *def)
738 {
739 NirDefMap::iterator it = ssaDefs.find(def->index);
740 if (it != ssaDefs.end())
741 return it->second;
742
743 LValues newDef(def->num_components);
744 for (uint8_t i = 0; i < def->num_components; i++)
745 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
746 return ssaDefs[def->index] = newDef;
747 }
748
749 Value*
getSrc(nir_alu_src * src,uint8_t component)750 Converter::getSrc(nir_alu_src *src, uint8_t component)
751 {
752 return getSrc(&src->src, src->swizzle[component]);
753 }
754
755 Value*
getSrc(nir_src * src,uint8_t idx,bool indirect)756 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
757 {
758 return getSrc(src->ssa, idx);
759 }
760
761 Value*
getSrc(nir_def * src,uint8_t idx)762 Converter::getSrc(nir_def *src, uint8_t idx)
763 {
764 ImmediateMap::iterator iit = immediates.find(src->index);
765 if (iit != immediates.end())
766 return convert((*iit).second, idx);
767
768 NirDefMap::iterator it = ssaDefs.find(src->index);
769 if (it == ssaDefs.end()) {
770 ERROR("SSA value %u not found\n", src->index);
771 assert(false);
772 return NULL;
773 }
774 return it->second[idx];
775 }
776
777 uint32_t
getIndirect(nir_src * src,uint8_t idx,Value * & indirect)778 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
779 {
780 nir_const_value *offset = nir_src_as_const_value(*src);
781
782 if (offset) {
783 indirect = NULL;
784 return offset[0].u32;
785 }
786
787 indirect = getSrc(src, idx, true);
788 return 0;
789 }
790
791 uint32_t
getIndirect(nir_intrinsic_instr * insn,uint8_t s,uint8_t c,Value * & indirect,bool isScalar)792 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
793 {
794 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
795
796 if (indirect && !isScalar)
797 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
798 return idx;
799 }
800
801 static void
vert_attrib_to_tgsi_semantic(gl_vert_attrib slot,unsigned * name,unsigned * index)802 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
803 {
804 assert(name && index);
805
806 if (slot >= VERT_ATTRIB_MAX) {
807 ERROR("invalid varying slot %u\n", slot);
808 assert(false);
809 return;
810 }
811
812 if (slot >= VERT_ATTRIB_GENERIC0 &&
813 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
814 *name = TGSI_SEMANTIC_GENERIC;
815 *index = slot - VERT_ATTRIB_GENERIC0;
816 return;
817 }
818
819 if (slot >= VERT_ATTRIB_TEX0 &&
820 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
821 *name = TGSI_SEMANTIC_TEXCOORD;
822 *index = slot - VERT_ATTRIB_TEX0;
823 return;
824 }
825
826 switch (slot) {
827 case VERT_ATTRIB_COLOR0:
828 *name = TGSI_SEMANTIC_COLOR;
829 *index = 0;
830 break;
831 case VERT_ATTRIB_COLOR1:
832 *name = TGSI_SEMANTIC_COLOR;
833 *index = 1;
834 break;
835 case VERT_ATTRIB_EDGEFLAG:
836 *name = TGSI_SEMANTIC_EDGEFLAG;
837 *index = 0;
838 break;
839 case VERT_ATTRIB_FOG:
840 *name = TGSI_SEMANTIC_FOG;
841 *index = 0;
842 break;
843 case VERT_ATTRIB_NORMAL:
844 *name = TGSI_SEMANTIC_NORMAL;
845 *index = 0;
846 break;
847 case VERT_ATTRIB_POS:
848 *name = TGSI_SEMANTIC_POSITION;
849 *index = 0;
850 break;
851 case VERT_ATTRIB_POINT_SIZE:
852 *name = TGSI_SEMANTIC_PSIZE;
853 *index = 0;
854 break;
855 default:
856 ERROR("unknown vert attrib slot %u\n", slot);
857 assert(false);
858 break;
859 }
860 }
861
862 uint8_t
translateInterpMode(const struct nv50_ir_varying * var,operation & op)863 Converter::translateInterpMode(const struct nv50_ir_varying *var, operation& op)
864 {
865 uint8_t mode = NV50_IR_INTERP_PERSPECTIVE;
866
867 if (var->flat)
868 mode = NV50_IR_INTERP_FLAT;
869 else
870 if (var->linear)
871 mode = NV50_IR_INTERP_LINEAR;
872 else
873 if (var->sc)
874 mode = NV50_IR_INTERP_SC;
875
876 op = (mode == NV50_IR_INTERP_PERSPECTIVE || mode == NV50_IR_INTERP_SC)
877 ? OP_PINTERP : OP_LINTERP;
878
879 if (var->centroid)
880 mode |= NV50_IR_INTERP_CENTROID;
881
882 return mode;
883 }
884
885 void
setInterpolate(nv50_ir_varying * var,uint8_t mode,bool centroid,unsigned semantic)886 Converter::setInterpolate(nv50_ir_varying *var,
887 uint8_t mode,
888 bool centroid,
889 unsigned semantic)
890 {
891 switch (mode) {
892 case INTERP_MODE_FLAT:
893 var->flat = 1;
894 break;
895 case INTERP_MODE_NONE:
896 if (semantic == TGSI_SEMANTIC_COLOR)
897 var->sc = 1;
898 else if (semantic == TGSI_SEMANTIC_POSITION)
899 var->linear = 1;
900 break;
901 case INTERP_MODE_NOPERSPECTIVE:
902 var->linear = 1;
903 break;
904 case INTERP_MODE_SMOOTH:
905 break;
906 }
907 var->centroid = centroid;
908 }
909
910 static uint16_t
calcSlots(const glsl_type * type,Program::Type stage,const shader_info & info,bool input,const nir_variable * var)911 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
912 bool input, const nir_variable *var)
913 {
914 if (!glsl_type_is_array(type))
915 return glsl_count_attribute_slots(type, false);
916
917 uint16_t slots;
918 switch (stage) {
919 case Program::TYPE_GEOMETRY:
920 slots = glsl_count_attribute_slots(type, false);
921 if (input)
922 slots /= info.gs.vertices_in;
923 break;
924 case Program::TYPE_TESSELLATION_CONTROL:
925 case Program::TYPE_TESSELLATION_EVAL:
926 // remove first dimension
927 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
928 slots = glsl_count_attribute_slots(type, false);
929 else
930 slots = glsl_count_attribute_slots(type->fields.array, false);
931 break;
932 default:
933 slots = glsl_count_attribute_slots(type, false);
934 break;
935 }
936
937 return slots;
938 }
939
940 static uint8_t
getMaskForType(const glsl_type * type,uint8_t slot)941 getMaskForType(const glsl_type *type, uint8_t slot) {
942 uint16_t comp = glsl_get_components(glsl_without_array(type));
943 comp = comp ? comp : 4;
944
945 if (glsl_base_type_is_64bit(glsl_without_array(type)->base_type)) {
946 comp *= 2;
947 if (comp > 4) {
948 if (slot % 2)
949 comp -= 4;
950 else
951 comp = 4;
952 }
953 }
954
955 return (1 << comp) - 1;
956 }
957
assignSlots()958 bool Converter::assignSlots() {
959 unsigned name;
960 unsigned index;
961
962 info->io.viewportId = -1;
963 info->io.mul_zero_wins = nir->info.use_legacy_math_rules;
964 info_out->numInputs = 0;
965 info_out->numOutputs = 0;
966 info_out->numSysVals = 0;
967
968 uint8_t i;
969 BITSET_FOREACH_SET(i, nir->info.system_values_read, SYSTEM_VALUE_MAX) {
970 switch (i) {
971 case SYSTEM_VALUE_VERTEX_ID:
972 info_out->io.vertexId = info_out->numSysVals;
973 break;
974 case SYSTEM_VALUE_INSTANCE_ID:
975 info_out->io.instanceId = info_out->numSysVals;
976 break;
977 default:
978 break;
979 }
980
981 info_out->sv[info_out->numSysVals].sn = (gl_system_value)i;
982 info_out->numSysVals += 1;
983 }
984
985 if (prog->getType() == Program::TYPE_COMPUTE)
986 return true;
987
988 nir_foreach_shader_in_variable(var, nir) {
989 const glsl_type *type = var->type;
990 int slot = var->data.location;
991 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
992 uint32_t vary = var->data.driver_location;
993 assert(vary + slots <= NV50_CODEGEN_MAX_VARYINGS);
994
995 switch(prog->getType()) {
996 case Program::TYPE_FRAGMENT:
997 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
998 &name, &index);
999 for (uint16_t i = 0; i < slots; ++i) {
1000 setInterpolate(&info_out->in[vary + i], var->data.interpolation,
1001 var->data.centroid | var->data.sample, name);
1002 }
1003 break;
1004 case Program::TYPE_GEOMETRY:
1005 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1006 &name, &index);
1007 break;
1008 case Program::TYPE_TESSELLATION_CONTROL:
1009 case Program::TYPE_TESSELLATION_EVAL:
1010 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1011 &name, &index);
1012 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1013 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1014 break;
1015 case Program::TYPE_VERTEX:
1016 if (slot >= VERT_ATTRIB_GENERIC0 && slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX)
1017 slot = VERT_ATTRIB_GENERIC0 + vary;
1018 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1019 switch (name) {
1020 case TGSI_SEMANTIC_EDGEFLAG:
1021 info_out->io.edgeFlagIn = vary;
1022 break;
1023 default:
1024 break;
1025 }
1026 break;
1027 default:
1028 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1029 return false;
1030 }
1031
1032 if (var->data.compact) {
1033 assert(!(nir->info.outputs_read & 1ull << slot));
1034 if (nir_is_arrayed_io(var, nir->info.stage)) {
1035 assert(glsl_type_is_array(type->fields.array));
1036 assert(glsl_type_is_scalar(type->fields.array->fields.array));
1037 assert(slots == glsl_get_length(type->fields.array));
1038 } else {
1039 assert(glsl_type_is_array(type));
1040 assert(glsl_type_is_scalar(type->fields.array));
1041 assert(slots == glsl_get_length(type));
1042 }
1043 assert(!glsl_base_type_is_64bit(glsl_without_array(type)->base_type));
1044
1045 uint32_t comps = BITFIELD_RANGE(var->data.location_frac, slots);
1046 assert(!(comps & ~0xff));
1047
1048 if (comps & 0x0f) {
1049 nv50_ir_varying *v = &info_out->in[vary];
1050 v->patch = var->data.patch;
1051 v->sn = name;
1052 v->si = index;
1053 v->mask |= comps & 0x0f;
1054 info_out->numInputs =
1055 std::max<uint8_t>(info_out->numInputs, vary + 1);
1056 }
1057 if (comps & 0xf0) {
1058 nv50_ir_varying *v = &info_out->in[vary + 1];
1059 v->patch = var->data.patch;
1060 v->sn = name;
1061 v->si = index + 1;
1062 v->mask |= (comps & 0xf0) >> 4;
1063 info_out->numInputs =
1064 std::max<uint8_t>(info_out->numInputs, vary + 2);
1065 }
1066 } else {
1067 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1068 nv50_ir_varying *v = &info_out->in[vary];
1069
1070 v->patch = var->data.patch;
1071 v->sn = name;
1072 v->si = index + i;
1073 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1074 }
1075 info_out->numInputs = std::max<uint8_t>(info_out->numInputs, vary);
1076 }
1077 }
1078
1079 nir_foreach_shader_out_variable(var, nir) {
1080 const glsl_type *type = var->type;
1081 int slot = var->data.location;
1082 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1083 uint32_t vary = var->data.driver_location;
1084
1085 assert(vary < NV50_CODEGEN_MAX_VARYINGS);
1086
1087 switch(prog->getType()) {
1088 case Program::TYPE_FRAGMENT:
1089 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1090 switch (name) {
1091 case TGSI_SEMANTIC_COLOR:
1092 if (!var->data.fb_fetch_output)
1093 info_out->prop.fp.numColourResults++;
1094 if (var->data.location == FRAG_RESULT_COLOR &&
1095 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1096 info_out->prop.fp.separateFragData = true;
1097 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1098 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1099 index = index == 0 ? var->data.index : index;
1100 break;
1101 case TGSI_SEMANTIC_POSITION:
1102 info_out->io.fragDepth = vary;
1103 info_out->prop.fp.writesDepth = true;
1104 break;
1105 case TGSI_SEMANTIC_SAMPLEMASK:
1106 info_out->io.sampleMask = vary;
1107 break;
1108 default:
1109 break;
1110 }
1111 break;
1112 case Program::TYPE_GEOMETRY:
1113 case Program::TYPE_TESSELLATION_CONTROL:
1114 case Program::TYPE_TESSELLATION_EVAL:
1115 case Program::TYPE_VERTEX:
1116 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1117 &name, &index);
1118
1119 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1120 name != TGSI_SEMANTIC_TESSOUTER)
1121 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1122
1123 switch (name) {
1124 case TGSI_SEMANTIC_EDGEFLAG:
1125 info_out->io.edgeFlagOut = vary;
1126 break;
1127 default:
1128 break;
1129 }
1130 break;
1131 default:
1132 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1133 return false;
1134 }
1135
1136 if (var->data.compact) {
1137 assert(!(nir->info.outputs_read & 1ull << slot));
1138 if (nir_is_arrayed_io(var, nir->info.stage)) {
1139 assert(glsl_type_is_array(type->fields.array));
1140 assert(glsl_type_is_scalar(type->fields.array->fields.array));
1141 assert(slots == glsl_get_length(type->fields.array));
1142 } else {
1143 assert(glsl_type_is_array(type));
1144 assert(glsl_type_is_scalar(type->fields.array));
1145 assert(slots == glsl_get_length(type));
1146 }
1147 assert(!glsl_base_type_is_64bit(glsl_without_array(type)->base_type));
1148
1149 uint32_t comps = BITFIELD_RANGE(var->data.location_frac, slots);
1150 assert(!(comps & ~0xff));
1151
1152 if (comps & 0x0f) {
1153 nv50_ir_varying *v = &info_out->out[vary];
1154 v->patch = var->data.patch;
1155 v->sn = name;
1156 v->si = index;
1157 v->mask |= comps & 0x0f;
1158 info_out->numOutputs =
1159 std::max<uint8_t>(info_out->numOutputs, vary + 1);
1160 }
1161 if (comps & 0xf0) {
1162 nv50_ir_varying *v = &info_out->out[vary + 1];
1163 v->patch = var->data.patch;
1164 v->sn = name;
1165 v->si = index + 1;
1166 v->mask |= (comps & 0xf0) >> 4;
1167 info_out->numOutputs =
1168 std::max<uint8_t>(info_out->numOutputs, vary + 2);
1169 }
1170 } else {
1171 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1172 nv50_ir_varying *v = &info_out->out[vary];
1173 v->patch = var->data.patch;
1174 v->sn = name;
1175 v->si = index + i;
1176 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1177
1178 if (nir->info.outputs_read & 1ull << slot)
1179 v->oread = 1;
1180 }
1181 info_out->numOutputs = std::max<uint8_t>(info_out->numOutputs, vary);
1182 }
1183 }
1184
1185 return info->assignSlots(info_out) == 0;
1186 }
1187
1188 uint32_t
getSlotAddress(nir_intrinsic_instr * insn,uint8_t idx,uint8_t slot)1189 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1190 {
1191 DataType ty;
1192 int offset = nir_intrinsic_component(insn);
1193 bool input;
1194
1195 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1196 ty = getDType(insn);
1197 else
1198 ty = getSType(insn->src[0], false, false);
1199
1200 switch (insn->intrinsic) {
1201 case nir_intrinsic_load_input:
1202 case nir_intrinsic_load_interpolated_input:
1203 case nir_intrinsic_load_per_vertex_input:
1204 input = true;
1205 break;
1206 case nir_intrinsic_load_output:
1207 case nir_intrinsic_load_per_vertex_output:
1208 case nir_intrinsic_store_output:
1209 case nir_intrinsic_store_per_vertex_output:
1210 input = false;
1211 break;
1212 default:
1213 ERROR("unknown intrinsic in getSlotAddress %s",
1214 nir_intrinsic_infos[insn->intrinsic].name);
1215 input = false;
1216 assert(false);
1217 break;
1218 }
1219
1220 if (typeSizeof(ty) == 8) {
1221 slot *= 2;
1222 slot += offset;
1223 if (slot >= 4) {
1224 idx += 1;
1225 slot -= 4;
1226 }
1227 } else {
1228 slot += offset;
1229 }
1230
1231 assert(slot < 4);
1232 assert(!input || idx < NV50_CODEGEN_MAX_VARYINGS);
1233 assert(input || idx < NV50_CODEGEN_MAX_VARYINGS);
1234
1235 const nv50_ir_varying *vary = input ? info_out->in : info_out->out;
1236 return vary[idx].slot[slot] * 4;
1237 }
1238
1239 Instruction *
loadFrom(DataFile file,uint8_t i,DataType ty,Value * def,uint32_t base,uint8_t c,Value * indirect0,Value * indirect1,bool patch,CacheMode cache)1240 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1241 uint32_t base, uint8_t c, Value *indirect0,
1242 Value *indirect1, bool patch, CacheMode cache)
1243 {
1244 unsigned int tySize = typeSizeof(ty);
1245
1246 if (tySize == 8 &&
1247 (indirect0 || !prog->getTarget()->isAccessSupported(file, TYPE_U64))) {
1248 Value *lo = getSSA();
1249 Value *hi = getSSA();
1250
1251 Instruction *loi =
1252 mkLoad(TYPE_U32, lo,
1253 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1254 indirect0);
1255 loi->setIndirect(0, 1, indirect1);
1256 loi->perPatch = patch;
1257 loi->cache = cache;
1258
1259 Instruction *hii =
1260 mkLoad(TYPE_U32, hi,
1261 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1262 indirect0);
1263 hii->setIndirect(0, 1, indirect1);
1264 hii->perPatch = patch;
1265 hii->cache = cache;
1266
1267 return mkOp2(OP_MERGE, ty, def, lo, hi);
1268 } else {
1269 Instruction *ld =
1270 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1271 ld->setIndirect(0, 1, indirect1);
1272 ld->perPatch = patch;
1273 ld->cache = cache;
1274 return ld;
1275 }
1276 }
1277
1278 Instruction *
loadVector(nir_intrinsic_instr * insn,uint8_t buffer,Value * indirectBuffer,uint32_t offset,Value * indirectOffset)1279 Converter::loadVector(nir_intrinsic_instr *insn,
1280 uint8_t buffer, Value *indirectBuffer,
1281 uint32_t offset, Value *indirectOffset)
1282 {
1283 uint32_t load_bytes = insn->def.bit_size / 8 * insn->def.num_components;
1284 DataType ty = typeOfSize(load_bytes, false, false);
1285 DataFile file = getFile(insn->intrinsic);
1286
1287 LValues &newDefs = convert(&insn->def);
1288 Value* def;
1289 if (insn->def.num_components == 1) {
1290 def = newDefs[0];
1291 } else {
1292 def = getSSA(load_bytes);
1293 }
1294
1295 Instruction *ld = mkLoad(ty, def, mkSymbol(file, buffer, ty, offset), indirectOffset);
1296 ld->setIndirect(0, 1, indirectBuffer);
1297
1298 if (insn->def.num_components != 1) {
1299 Instruction *split = mkOp1(OP_SPLIT, ty, newDefs[0], def);
1300 for (int i = 1; i < insn->def.num_components; i++) {
1301 split->setDef(i, newDefs[i]);
1302 }
1303 }
1304
1305 return ld;
1306 }
1307
1308 void
storeTo(nir_intrinsic_instr * insn,DataFile file,operation op,DataType ty,Value * src,uint8_t idx,uint8_t c,Value * indirect0,Value * indirect1)1309 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1310 DataType ty, Value *src, uint8_t idx, uint8_t c,
1311 Value *indirect0, Value *indirect1)
1312 {
1313 uint8_t size = typeSizeof(ty);
1314 uint32_t address = getSlotAddress(insn, idx, c);
1315
1316 if (size == 8 && indirect0) {
1317 Value *split[2];
1318 mkSplit(split, 4, src);
1319
1320 if (op == OP_EXPORT) {
1321 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1322 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1323 }
1324
1325 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1326 split[0])->perPatch = info_out->out[idx].patch;
1327 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1328 split[1])->perPatch = info_out->out[idx].patch;
1329 } else {
1330 if (op == OP_EXPORT)
1331 src = mkMov(getSSA(size), src, ty)->getDef(0);
1332 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1333 src)->perPatch = info_out->out[idx].patch;
1334 }
1335 }
1336
1337 Instruction *
storeVector(nir_intrinsic_instr * insn,uint8_t buffer,Value * indirectBuffer,uint32_t offset,Value * indirectOffset)1338 Converter::storeVector(nir_intrinsic_instr *insn,
1339 uint8_t buffer, Value *indirectBuffer,
1340 uint32_t offset, Value *indirectOffset)
1341 {
1342 const uint8_t num_components = insn->src[0].ssa->num_components;
1343 uint32_t bytes = insn->src[0].ssa->bit_size / 8 * num_components;
1344 DataType ty = typeOfSize(bytes, false, false);
1345 DataFile file = getFile(insn->intrinsic);
1346 assert(nir_intrinsic_write_mask(insn) == nir_component_mask(num_components));
1347
1348 Value* src;
1349 if (num_components == 1) {
1350 src = getSrc(&insn->src[0], 0);
1351 } else {
1352 src = getSSA(bytes);
1353
1354 Instruction *merge = mkOp(OP_MERGE, ty, src);
1355 for (int i = 0; i < num_components; i++) {
1356 merge->setSrc(i, getSrc(&insn->src[0], i));
1357 }
1358 }
1359
1360 Instruction *st = mkStore(OP_STORE, ty, mkSymbol(file, buffer, ty, offset),
1361 indirectOffset, src);
1362 st->setIndirect(0, 1, indirectBuffer);
1363
1364 return st;
1365 }
1366
1367 bool
memVectorizeCb(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * cb_data)1368 Converter::memVectorizeCb(unsigned align_mul,
1369 unsigned align_offset,
1370 unsigned bit_size,
1371 unsigned num_components,
1372 nir_intrinsic_instr *low,
1373 nir_intrinsic_instr *high,
1374 void *cb_data)
1375 {
1376 /*
1377 * Since we legalize these later with nir_lower_mem_access_bit_sizes,
1378 * we can optimistically combine anything that might be profitable
1379 */
1380 const Converter* converter = (Converter*) cb_data;
1381 const Target* target = converter->prog->getTarget();
1382
1383 assert(util_is_power_of_two_nonzero(align_mul));
1384 align_mul = std::min(align_mul, 128u / 8u);
1385
1386 const DataFile file = converter->getFile(low->intrinsic);
1387 if (align_mul == 128u / 8u && !target->isAccessSupported(file, TYPE_B128))
1388 align_mul = 64u / 8u;
1389
1390 if (align_mul == 64u / 8u && !target->isAccessSupported(file, TYPE_U64))
1391 align_mul = 32u / 8u;
1392
1393 align_offset = align_offset % align_mul;
1394
1395 return align_offset + num_components * (bit_size / 8) <= align_mul;
1396 }
1397
1398 nir_mem_access_size_align
getMemAccessSizeAlign(nir_intrinsic_op intrin,uint8_t original_bytes,uint8_t original_bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,const void * cb_data)1399 Converter::getMemAccessSizeAlign(nir_intrinsic_op intrin,
1400 uint8_t original_bytes,
1401 uint8_t original_bit_size,
1402 uint32_t align_mul,
1403 uint32_t align_offset,
1404 bool offset_is_const,
1405 const void *cb_data)
1406 {
1407 const Converter* converter = (Converter*) cb_data;
1408 const Target* target = converter->prog->getTarget();
1409
1410 const uint32_t align = nir_combined_align(align_mul, align_offset);
1411
1412 assert(original_bytes != 0);
1413 uint32_t bytes = 1 << (util_last_bit(original_bytes) - 1);
1414
1415 bytes = std::min(bytes, align);
1416 bytes = std::min(bytes, 128u / 8u);
1417
1418 assert(util_is_power_of_two_nonzero(bytes));
1419
1420 const DataFile file = converter->getFile(intrin);
1421 if (bytes == 128u / 8u && !target->isAccessSupported(file, TYPE_B128))
1422 bytes = 64u / 8u;
1423
1424 if (bytes == 64u / 8u && !target->isAccessSupported(file, TYPE_U64))
1425 bytes = 32u / 8u;
1426
1427 uint32_t bit_size = original_bit_size;
1428 bit_size = std::max(bit_size, 32u);
1429 bit_size = std::min(bit_size, bytes * 8u);
1430
1431 return {
1432 .num_components = (uint8_t) (bytes / (bit_size / 8)),
1433 .bit_size = (uint8_t) bit_size,
1434 .align = (uint16_t) bytes,
1435 };
1436 }
1437
1438 bool
parseNIR()1439 Converter::parseNIR()
1440 {
1441 info_out->bin.tlsSpace = nir->scratch_size;
1442 info_out->io.clipDistances = nir->info.clip_distance_array_size;
1443 info_out->io.cullDistances = nir->info.cull_distance_array_size;
1444 info_out->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1445
1446 switch(prog->getType()) {
1447 case Program::TYPE_COMPUTE:
1448 info->prop.cp.numThreads[0] = nir->info.workgroup_size[0];
1449 info->prop.cp.numThreads[1] = nir->info.workgroup_size[1];
1450 info->prop.cp.numThreads[2] = nir->info.workgroup_size[2];
1451 info_out->bin.smemSize = std::max(info_out->bin.smemSize, nir->info.shared_size);
1452
1453 if (info->target < NVISA_GF100_CHIPSET) {
1454 int gmemSlot = 0;
1455
1456 for (unsigned i = 0; i < nir->info.num_ssbos; i++) {
1457 info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 0, .slot = i};
1458 assert(gmemSlot < 16);
1459 }
1460 nir_foreach_image_variable(var, nir) {
1461 int image_count = glsl_type_get_image_count(var->type);
1462 for (int i = 0; i < image_count; i++) {
1463 info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 1, .slot = var->data.binding + i};
1464 assert(gmemSlot < 16);
1465 }
1466 }
1467 }
1468
1469 break;
1470 case Program::TYPE_FRAGMENT:
1471 info_out->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1472 prog->persampleInvocation =
1473 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
1474 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1475 info_out->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1476 info_out->prop.fp.readsSampleLocations =
1477 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1478 info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard;
1479 info_out->prop.fp.usesSampleMaskIn =
1480 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
1481 break;
1482 case Program::TYPE_GEOMETRY:
1483 info_out->prop.gp.instanceCount = nir->info.gs.invocations;
1484 info_out->prop.gp.maxVertices = nir->info.gs.vertices_out;
1485 info_out->prop.gp.outputPrim = nir->info.gs.output_primitive;
1486 break;
1487 case Program::TYPE_TESSELLATION_CONTROL:
1488 case Program::TYPE_TESSELLATION_EVAL:
1489 info_out->prop.tp.domain = u_tess_prim_from_shader(nir->info.tess._primitive_mode);
1490 info_out->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1491 info_out->prop.tp.outputPrim =
1492 nir->info.tess.point_mode ? MESA_PRIM_POINTS : MESA_PRIM_TRIANGLES;
1493 info_out->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1494 info_out->prop.tp.winding = !nir->info.tess.ccw;
1495 break;
1496 case Program::TYPE_VERTEX:
1497 info_out->prop.vp.usesDrawParameters =
1498 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_VERTEX) ||
1499 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
1500 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1501 break;
1502 default:
1503 break;
1504 }
1505
1506 return true;
1507 }
1508
1509 bool
visit(nir_function * function)1510 Converter::visit(nir_function *function)
1511 {
1512 assert(function->impl);
1513
1514 // usually the blocks will set everything up, but main is special
1515 BasicBlock *entry = new BasicBlock(prog->main);
1516 exit = new BasicBlock(prog->main);
1517 blocks[nir_start_block(function->impl)->index] = entry;
1518 prog->main->setEntry(entry);
1519 prog->main->setExit(exit);
1520
1521 setPosition(entry, true);
1522
1523 switch (prog->getType()) {
1524 case Program::TYPE_TESSELLATION_CONTROL:
1525 outBase = mkOp2v(
1526 OP_SUB, TYPE_U32, getSSA(),
1527 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1528 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1529 break;
1530 case Program::TYPE_FRAGMENT: {
1531 Symbol *sv = mkSysVal(SV_POSITION, 3);
1532 Value *temp = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1533 fp.position = mkOp1v(OP_RCP, TYPE_F32, temp, temp);
1534 break;
1535 }
1536 default:
1537 break;
1538 }
1539
1540 nir_index_ssa_defs(function->impl);
1541 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1542 if (!visit(node))
1543 return false;
1544 }
1545
1546 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1547 setPosition(exit, true);
1548
1549 // TODO: for non main function this needs to be a OP_RETURN
1550 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1551 return true;
1552 }
1553
1554 bool
visit(nir_cf_node * node)1555 Converter::visit(nir_cf_node *node)
1556 {
1557 switch (node->type) {
1558 case nir_cf_node_block:
1559 return visit(nir_cf_node_as_block(node));
1560 case nir_cf_node_if:
1561 return visit(nir_cf_node_as_if(node));
1562 case nir_cf_node_loop:
1563 return visit(nir_cf_node_as_loop(node));
1564 default:
1565 ERROR("unknown nir_cf_node type %u\n", node->type);
1566 return false;
1567 }
1568 }
1569
1570 bool
visit(nir_block * block)1571 Converter::visit(nir_block *block)
1572 {
1573 if (!block->predecessors->entries && block->instr_list.is_empty())
1574 return true;
1575
1576 BasicBlock *bb = convert(block);
1577
1578 setPosition(bb, true);
1579 nir_foreach_instr(insn, block) {
1580 if (!visit(insn))
1581 return false;
1582 }
1583 return true;
1584 }
1585
1586 bool
visit(nir_if * nif)1587 Converter::visit(nir_if *nif)
1588 {
1589 curIfDepth++;
1590
1591 DataType sType = getSType(nif->condition, false, false);
1592 Value *src = getSrc(&nif->condition, 0);
1593
1594 nir_block *lastThen = nir_if_last_then_block(nif);
1595 nir_block *lastElse = nir_if_last_else_block(nif);
1596
1597 BasicBlock *headBB = bb;
1598 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1599 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1600
1601 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1602 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1603
1604 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1605 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1606
1607 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1608 if (!visit(node))
1609 return false;
1610 }
1611
1612 setPosition(convert(lastThen), true);
1613 if (!bb->isTerminated()) {
1614 BasicBlock *tailBB = convert(lastThen->successors[0]);
1615 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1616 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1617 } else {
1618 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1619 }
1620
1621 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1622 if (!visit(node))
1623 return false;
1624 }
1625
1626 setPosition(convert(lastElse), true);
1627 if (!bb->isTerminated()) {
1628 BasicBlock *tailBB = convert(lastElse->successors[0]);
1629 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1630 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1631 } else {
1632 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1633 }
1634
1635 if (curIfDepth > 6) {
1636 insertJoins = false;
1637 }
1638
1639 /* we made sure that all threads would converge at the same block */
1640 if (insertJoins) {
1641 BasicBlock *conv = convert(lastThen->successors[0]);
1642 setPosition(headBB->getExit(), false);
1643 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1644 setPosition(conv, false);
1645 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1646 }
1647
1648 curIfDepth--;
1649
1650 return true;
1651 }
1652
1653 // TODO: add convergency
1654 bool
visit(nir_loop * loop)1655 Converter::visit(nir_loop *loop)
1656 {
1657 assert(!nir_loop_has_continue_construct(loop));
1658 curLoopDepth += 1;
1659 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1660
1661 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1662 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1663
1664 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1665
1666 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1667 setPosition(loopBB, false);
1668 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1669
1670 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1671 if (!visit(node))
1672 return false;
1673 }
1674
1675 if (!bb->isTerminated()) {
1676 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1677 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1678 }
1679
1680 if (tailBB->cfg.incidentCount() == 0)
1681 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1682
1683 curLoopDepth -= 1;
1684
1685 info_out->loops++;
1686
1687 return true;
1688 }
1689
1690 bool
visit(nir_instr * insn)1691 Converter::visit(nir_instr *insn)
1692 {
1693 // we need an insertion point for on the fly generated immediate loads
1694 immInsertPos = bb->getExit();
1695 switch (insn->type) {
1696 case nir_instr_type_alu:
1697 return visit(nir_instr_as_alu(insn));
1698 case nir_instr_type_intrinsic:
1699 return visit(nir_instr_as_intrinsic(insn));
1700 case nir_instr_type_jump:
1701 return visit(nir_instr_as_jump(insn));
1702 case nir_instr_type_load_const:
1703 return visit(nir_instr_as_load_const(insn));
1704 case nir_instr_type_undef:
1705 return visit(nir_instr_as_undef(insn));
1706 case nir_instr_type_tex:
1707 return visit(nir_instr_as_tex(insn));
1708 default:
1709 ERROR("unknown nir_instr type %u\n", insn->type);
1710 return false;
1711 }
1712 return true;
1713 }
1714
1715 SVSemantic
convert(nir_intrinsic_op intr)1716 Converter::convert(nir_intrinsic_op intr)
1717 {
1718 switch (intr) {
1719 case nir_intrinsic_load_base_vertex:
1720 return SV_BASEVERTEX;
1721 case nir_intrinsic_load_base_instance:
1722 return SV_BASEINSTANCE;
1723 case nir_intrinsic_load_draw_id:
1724 return SV_DRAWID;
1725 case nir_intrinsic_load_front_face:
1726 return SV_FACE;
1727 case nir_intrinsic_is_helper_invocation:
1728 case nir_intrinsic_load_helper_invocation:
1729 return SV_THREAD_KILL;
1730 case nir_intrinsic_load_instance_id:
1731 return SV_INSTANCE_ID;
1732 case nir_intrinsic_load_invocation_id:
1733 return SV_INVOCATION_ID;
1734 case nir_intrinsic_load_workgroup_size:
1735 return SV_NTID;
1736 case nir_intrinsic_load_local_invocation_id:
1737 return SV_TID;
1738 case nir_intrinsic_load_num_workgroups:
1739 return SV_NCTAID;
1740 case nir_intrinsic_load_patch_vertices_in:
1741 return SV_VERTEX_COUNT;
1742 case nir_intrinsic_load_primitive_id:
1743 return SV_PRIMITIVE_ID;
1744 case nir_intrinsic_load_sample_id:
1745 return SV_SAMPLE_INDEX;
1746 case nir_intrinsic_load_sample_mask_in:
1747 return SV_SAMPLE_MASK;
1748 case nir_intrinsic_load_sample_pos:
1749 return SV_SAMPLE_POS;
1750 case nir_intrinsic_load_subgroup_eq_mask:
1751 return SV_LANEMASK_EQ;
1752 case nir_intrinsic_load_subgroup_ge_mask:
1753 return SV_LANEMASK_GE;
1754 case nir_intrinsic_load_subgroup_gt_mask:
1755 return SV_LANEMASK_GT;
1756 case nir_intrinsic_load_subgroup_le_mask:
1757 return SV_LANEMASK_LE;
1758 case nir_intrinsic_load_subgroup_lt_mask:
1759 return SV_LANEMASK_LT;
1760 case nir_intrinsic_load_subgroup_invocation:
1761 return SV_LANEID;
1762 case nir_intrinsic_load_tess_coord:
1763 return SV_TESS_COORD;
1764 case nir_intrinsic_load_tess_level_inner:
1765 return SV_TESS_INNER;
1766 case nir_intrinsic_load_tess_level_outer:
1767 return SV_TESS_OUTER;
1768 case nir_intrinsic_load_vertex_id:
1769 return SV_VERTEX_ID;
1770 case nir_intrinsic_load_workgroup_id:
1771 return SV_CTAID;
1772 case nir_intrinsic_load_work_dim:
1773 return SV_WORK_DIM;
1774 default:
1775 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1776 nir_intrinsic_infos[intr].name);
1777 assert(false);
1778 return SV_LAST;
1779 }
1780 }
1781
1782 bool
visit(nir_intrinsic_instr * insn)1783 Converter::visit(nir_intrinsic_instr *insn)
1784 {
1785 nir_intrinsic_op op = insn->intrinsic;
1786 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1787 unsigned dest_components = nir_intrinsic_dest_components(insn);
1788
1789 switch (op) {
1790 case nir_intrinsic_decl_reg: {
1791 const unsigned reg_index = insn->def.index;
1792 const unsigned bit_size = nir_intrinsic_bit_size(insn);
1793 const unsigned num_components = nir_intrinsic_num_components(insn);
1794 assert(nir_intrinsic_num_array_elems(insn) == 0);
1795
1796 LValues newDef(num_components);
1797 for (uint8_t c = 0; c < num_components; c++)
1798 newDef[c] = getScratch(std::max(4u, bit_size / 8));
1799
1800 assert(regDefs.find(reg_index) == regDefs.end());
1801 regDefs[reg_index] = newDef;
1802 break;
1803 }
1804
1805 case nir_intrinsic_load_reg: {
1806 const unsigned reg_index = insn->src[0].ssa->index;
1807 NirDefMap::iterator it = regDefs.find(reg_index);
1808 assert(it != regDefs.end());
1809 LValues &src = it->second;
1810
1811 DataType dType = getDType(insn);
1812 LValues &newDefs = convert(&insn->def);
1813 for (uint8_t c = 0; c < insn->num_components; c++)
1814 mkMov(newDefs[c], src[c], dType);
1815 break;
1816 }
1817
1818 case nir_intrinsic_store_reg: {
1819 const unsigned reg_index = insn->src[1].ssa->index;
1820 NirDefMap::iterator it = regDefs.find(reg_index);
1821 assert(it != regDefs.end());
1822 LValues &dst = it->second;
1823
1824 DataType dType = Converter::getSType(insn->src[0], false, false);
1825
1826 const nir_component_mask_t write_mask = nir_intrinsic_write_mask(insn);
1827 for (uint8_t c = 0u; c < insn->num_components; c++) {
1828 if (!((1u << c) & write_mask))
1829 continue;
1830
1831 Value *src = getSrc(&insn->src[0], c);
1832 mkMov(dst[c], src, dType);
1833 }
1834 break;
1835 }
1836
1837 case nir_intrinsic_store_output:
1838 case nir_intrinsic_store_per_vertex_output: {
1839 Value *indirect;
1840 DataType dType = getSType(insn->src[0], false, false);
1841 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1842
1843 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1844 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1845 continue;
1846
1847 uint8_t offset = 0;
1848 Value *src = getSrc(&insn->src[0], i);
1849 switch (prog->getType()) {
1850 case Program::TYPE_FRAGMENT: {
1851 if (info_out->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1852 // TGSI uses a different interface than NIR, TGSI stores that
1853 // value in the z component, NIR in X
1854 offset += 2;
1855 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1856 }
1857 break;
1858 }
1859 default:
1860 break;
1861 }
1862
1863 storeTo(insn, getFile(op), OP_EXPORT, dType, src, idx, i + offset, indirect);
1864 }
1865 break;
1866 }
1867 case nir_intrinsic_load_input:
1868 case nir_intrinsic_load_interpolated_input:
1869 case nir_intrinsic_load_output: {
1870 LValues &newDefs = convert(&insn->def);
1871
1872 // FBFetch
1873 if (prog->getType() == Program::TYPE_FRAGMENT &&
1874 op == nir_intrinsic_load_output) {
1875 std::vector<Value*> defs, srcs;
1876 uint8_t mask = 0;
1877
1878 srcs.push_back(getSSA());
1879 srcs.push_back(getSSA());
1880 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1881 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1882 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1883 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1884
1885 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1886 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1887
1888 for (uint8_t i = 0u; i < dest_components; ++i) {
1889 defs.push_back(newDefs[i]);
1890 mask |= 1 << i;
1891 }
1892
1893 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1894 texi->tex.levelZero = true;
1895 texi->tex.mask = mask;
1896 texi->tex.useOffsets = 0;
1897 texi->tex.r = 0xffff;
1898 texi->tex.s = 0xffff;
1899
1900 info_out->prop.fp.readsFramebuffer = true;
1901 break;
1902 }
1903
1904 const DataType dType = getDType(insn);
1905 Value *indirect;
1906 bool input = op != nir_intrinsic_load_output;
1907 operation nvirOp = OP_LAST;
1908 uint32_t mode = 0;
1909
1910 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1911 nv50_ir_varying& vary = input ? info_out->in[idx] : info_out->out[idx];
1912
1913 // see load_barycentric_* handling
1914 if (prog->getType() == Program::TYPE_FRAGMENT) {
1915 if (op == nir_intrinsic_load_interpolated_input) {
1916 ImmediateValue immMode;
1917 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1918 mode = immMode.reg.data.u32;
1919 }
1920 if (mode == NV50_IR_INTERP_DEFAULT)
1921 mode |= translateInterpMode(&vary, nvirOp);
1922 else {
1923 if (vary.linear) {
1924 nvirOp = OP_LINTERP;
1925 mode |= NV50_IR_INTERP_LINEAR;
1926 } else {
1927 nvirOp = OP_PINTERP;
1928 mode |= NV50_IR_INTERP_PERSPECTIVE;
1929 }
1930 }
1931 }
1932
1933 for (uint8_t i = 0u; i < dest_components; ++i) {
1934 uint32_t address = getSlotAddress(insn, idx, i);
1935 Symbol *sym = mkSymbol(getFile(op), 0, dType, address);
1936 if (prog->getType() == Program::TYPE_FRAGMENT) {
1937 int s = 1;
1938 if (typeSizeof(dType) == 8) {
1939 Value *lo = getSSA();
1940 Value *hi = getSSA();
1941 Instruction *interp;
1942
1943 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1944 if (nvirOp == OP_PINTERP)
1945 interp->setSrc(s++, fp.position);
1946 if (mode & NV50_IR_INTERP_OFFSET)
1947 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1948 interp->setInterpolate(mode);
1949 interp->setIndirect(0, 0, indirect);
1950
1951 Symbol *sym1 = mkSymbol(getFile(op), 0, dType, address + 4);
1952 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1953 if (nvirOp == OP_PINTERP)
1954 interp->setSrc(s++, fp.position);
1955 if (mode & NV50_IR_INTERP_OFFSET)
1956 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1957 interp->setInterpolate(mode);
1958 interp->setIndirect(0, 0, indirect);
1959
1960 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1961 } else {
1962 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1963 if (nvirOp == OP_PINTERP)
1964 interp->setSrc(s++, fp.position);
1965 if (mode & NV50_IR_INTERP_OFFSET)
1966 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1967 interp->setInterpolate(mode);
1968 interp->setIndirect(0, 0, indirect);
1969 }
1970 } else {
1971 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1972 }
1973 }
1974 break;
1975 }
1976 case nir_intrinsic_load_barycentric_at_offset:
1977 case nir_intrinsic_load_barycentric_at_sample:
1978 case nir_intrinsic_load_barycentric_centroid:
1979 case nir_intrinsic_load_barycentric_pixel:
1980 case nir_intrinsic_load_barycentric_sample: {
1981 LValues &newDefs = convert(&insn->def);
1982 uint32_t mode;
1983
1984 if (op == nir_intrinsic_load_barycentric_centroid ||
1985 op == nir_intrinsic_load_barycentric_sample) {
1986 mode = NV50_IR_INTERP_CENTROID;
1987 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1988 Value *offs[2];
1989 for (uint8_t c = 0; c < 2; c++) {
1990 offs[c] = getScratch();
1991 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1992 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1993 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1994 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1995 }
1996 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1997
1998 mode = NV50_IR_INTERP_OFFSET;
1999 } else if (op == nir_intrinsic_load_barycentric_pixel) {
2000 mode = NV50_IR_INTERP_DEFAULT;
2001 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
2002 info_out->prop.fp.readsSampleLocations = true;
2003 Value *sample = getSSA();
2004 mkOp3(OP_SELP, TYPE_U32, sample, mkImm(0), getSrc(&insn->src[0], 0), mkImm(0))
2005 ->subOp = 2;
2006 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], sample)->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
2007 mode = NV50_IR_INTERP_OFFSET;
2008 } else {
2009 unreachable("all intrinsics already handled above");
2010 }
2011
2012 loadImm(newDefs[1], mode);
2013 break;
2014 }
2015 case nir_intrinsic_demote:
2016 mkOp(OP_DISCARD, TYPE_NONE, NULL);
2017 break;
2018 case nir_intrinsic_demote_if: {
2019 Value *pred = getSSA(1, FILE_PREDICATE);
2020 if (insn->num_components > 1) {
2021 ERROR("nir_intrinsic_demote_if only with 1 component supported!\n");
2022 assert(false);
2023 return false;
2024 }
2025 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2026 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
2027 break;
2028 }
2029 case nir_intrinsic_load_base_vertex:
2030 case nir_intrinsic_load_base_instance:
2031 case nir_intrinsic_load_draw_id:
2032 case nir_intrinsic_load_front_face:
2033 case nir_intrinsic_is_helper_invocation:
2034 case nir_intrinsic_load_helper_invocation:
2035 case nir_intrinsic_load_instance_id:
2036 case nir_intrinsic_load_invocation_id:
2037 case nir_intrinsic_load_workgroup_size:
2038 case nir_intrinsic_load_local_invocation_id:
2039 case nir_intrinsic_load_num_workgroups:
2040 case nir_intrinsic_load_patch_vertices_in:
2041 case nir_intrinsic_load_primitive_id:
2042 case nir_intrinsic_load_sample_id:
2043 case nir_intrinsic_load_sample_mask_in:
2044 case nir_intrinsic_load_sample_pos:
2045 case nir_intrinsic_load_subgroup_eq_mask:
2046 case nir_intrinsic_load_subgroup_ge_mask:
2047 case nir_intrinsic_load_subgroup_gt_mask:
2048 case nir_intrinsic_load_subgroup_le_mask:
2049 case nir_intrinsic_load_subgroup_lt_mask:
2050 case nir_intrinsic_load_subgroup_invocation:
2051 case nir_intrinsic_load_tess_coord:
2052 case nir_intrinsic_load_tess_level_inner:
2053 case nir_intrinsic_load_tess_level_outer:
2054 case nir_intrinsic_load_vertex_id:
2055 case nir_intrinsic_load_workgroup_id:
2056 case nir_intrinsic_load_work_dim: {
2057 const DataType dType = getDType(insn);
2058 SVSemantic sv = convert(op);
2059 LValues &newDefs = convert(&insn->def);
2060
2061 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
2062 Value *def;
2063 if (typeSizeof(dType) == 8)
2064 def = getSSA();
2065 else
2066 def = newDefs[i];
2067
2068 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
2069 loadImm(def, 0u);
2070 } else {
2071 Symbol *sym = mkSysVal(sv, i);
2072 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
2073 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
2074 rdsv->perPatch = 1;
2075 }
2076
2077 if (typeSizeof(dType) == 8)
2078 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
2079 }
2080 break;
2081 }
2082 // constants
2083 case nir_intrinsic_load_subgroup_size: {
2084 LValues &newDefs = convert(&insn->def);
2085 loadImm(newDefs[0], 32u);
2086 break;
2087 }
2088 case nir_intrinsic_vote_all:
2089 case nir_intrinsic_vote_any:
2090 case nir_intrinsic_vote_ieq: {
2091 LValues &newDefs = convert(&insn->def);
2092 Value *pred = getScratch(1, FILE_PREDICATE);
2093 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2094 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
2095 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
2096 break;
2097 }
2098 case nir_intrinsic_ballot: {
2099 LValues &newDefs = convert(&insn->def);
2100 Value *pred = getSSA(1, FILE_PREDICATE);
2101 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2102 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
2103 break;
2104 }
2105 case nir_intrinsic_read_first_invocation:
2106 case nir_intrinsic_read_invocation: {
2107 LValues &newDefs = convert(&insn->def);
2108 const DataType dType = getDType(insn);
2109 Value *tmp = getScratch();
2110
2111 if (op == nir_intrinsic_read_first_invocation) {
2112 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
2113 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
2114 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2115 } else
2116 tmp = getSrc(&insn->src[1], 0);
2117
2118 for (uint8_t i = 0; i < dest_components; ++i) {
2119 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
2120 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
2121 }
2122 break;
2123 }
2124 case nir_intrinsic_load_per_vertex_input: {
2125 const DataType dType = getDType(insn);
2126 LValues &newDefs = convert(&insn->def);
2127 Value *indirectVertex;
2128 Value *indirectOffset;
2129 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2130 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2131
2132 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
2133 mkImm(baseVertex), indirectVertex);
2134 for (uint8_t i = 0u; i < dest_components; ++i) {
2135 uint32_t address = getSlotAddress(insn, idx, i);
2136 loadFrom(getFile(op), 0, dType, newDefs[i], address, 0,
2137 indirectOffset, vtxBase, info_out->in[idx].patch);
2138 }
2139 break;
2140 }
2141 case nir_intrinsic_load_per_vertex_output: {
2142 const DataType dType = getDType(insn);
2143 LValues &newDefs = convert(&insn->def);
2144 Value *indirectVertex;
2145 Value *indirectOffset;
2146 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2147 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2148 Value *vtxBase = NULL;
2149
2150 if (indirectVertex)
2151 vtxBase = indirectVertex;
2152 else
2153 vtxBase = loadImm(NULL, baseVertex);
2154
2155 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
2156
2157 for (uint8_t i = 0u; i < dest_components; ++i) {
2158 uint32_t address = getSlotAddress(insn, idx, i);
2159 loadFrom(getFile(op), 0, dType, newDefs[i], address, 0,
2160 indirectOffset, vtxBase, info_out->in[idx].patch);
2161 }
2162 break;
2163 }
2164 case nir_intrinsic_emit_vertex: {
2165 uint32_t idx = nir_intrinsic_stream_id(insn);
2166 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2167 break;
2168 }
2169 case nir_intrinsic_end_primitive: {
2170 uint32_t idx = nir_intrinsic_stream_id(insn);
2171 if (idx)
2172 break;
2173 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2174 break;
2175 }
2176 case nir_intrinsic_load_ubo:
2177 case nir_intrinsic_ldc_nv: {
2178 const DataType dType = getDType(insn);
2179 LValues &newDefs = convert(&insn->def);
2180 Value *indirectIndex;
2181 Value *indirectOffset;
2182 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex);
2183 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2184 if (indirectOffset)
2185 indirectOffset = mkOp1v(OP_MOV, TYPE_U32, getSSA(4, FILE_ADDRESS), indirectOffset);
2186
2187 for (uint8_t i = 0u; i < dest_components; ++i) {
2188 loadFrom(getFile(op), index, dType, newDefs[i], offset, i,
2189 indirectOffset, indirectIndex);
2190 }
2191 break;
2192 }
2193 case nir_intrinsic_get_ssbo_size: {
2194 LValues &newDefs = convert(&insn->def);
2195 const DataType dType = getDType(insn);
2196 Value *indirectBuffer;
2197 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2198
2199 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2200 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2201 break;
2202 }
2203 case nir_intrinsic_store_ssbo: {
2204 Value *indirectBuffer;
2205 Value *indirectOffset;
2206 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2207 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2208
2209 CacheMode cache = convert(nir_intrinsic_access(insn));
2210
2211 storeVector(insn, buffer, indirectBuffer, offset, indirectOffset)->cache = cache;
2212
2213 info_out->io.globalAccess |= 0x2;
2214 break;
2215 }
2216 case nir_intrinsic_load_ssbo: {
2217 Value *indirectBuffer;
2218 Value *indirectOffset;
2219 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2220 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2221
2222 CacheMode cache = convert(nir_intrinsic_access(insn));
2223
2224 loadVector(insn, buffer, indirectBuffer, offset, indirectOffset)->cache = cache;
2225
2226 info_out->io.globalAccess |= 0x1;
2227 break;
2228 }
2229 case nir_intrinsic_shared_atomic:
2230 case nir_intrinsic_shared_atomic_swap: {
2231 const DataType dType = getDType(insn);
2232 LValues &newDefs = convert(&insn->def);
2233 Value *indirectOffset;
2234 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2235 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2236 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2237 if (op == nir_intrinsic_shared_atomic_swap)
2238 atom->setSrc(2, getSrc(&insn->src[2], 0));
2239 atom->setIndirect(0, 0, indirectOffset);
2240 atom->subOp = getAtomicSubOp(nir_intrinsic_atomic_op(insn));
2241 break;
2242 }
2243 case nir_intrinsic_ssbo_atomic:
2244 case nir_intrinsic_ssbo_atomic_swap: {
2245 const DataType dType = getDType(insn);
2246 LValues &newDefs = convert(&insn->def);
2247 Value *indirectBuffer;
2248 Value *indirectOffset;
2249 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2250 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2251
2252 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2253 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2254 getSrc(&insn->src[2], 0));
2255 if (op == nir_intrinsic_ssbo_atomic_swap)
2256 atom->setSrc(2, getSrc(&insn->src[3], 0));
2257 atom->setIndirect(0, 0, indirectOffset);
2258 atom->setIndirect(0, 1, indirectBuffer);
2259 atom->subOp = getAtomicSubOp(nir_intrinsic_atomic_op(insn));
2260
2261 info_out->io.globalAccess |= 0x2;
2262 break;
2263 }
2264 case nir_intrinsic_global_atomic:
2265 case nir_intrinsic_global_atomic_swap: {
2266 const DataType dType = getDType(insn);
2267 LValues &newDefs = convert(&insn->def);
2268 Value *address;
2269 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2270
2271 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2272 Instruction *atom =
2273 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2274 if (op == nir_intrinsic_global_atomic_swap)
2275 atom->setSrc(2, getSrc(&insn->src[2], 0));
2276 atom->setIndirect(0, 0, address);
2277 atom->subOp = getAtomicSubOp(nir_intrinsic_atomic_op(insn));
2278
2279 info_out->io.globalAccess |= 0x2;
2280 break;
2281 }
2282 case nir_intrinsic_bindless_image_atomic:
2283 case nir_intrinsic_bindless_image_atomic_swap:
2284 case nir_intrinsic_bindless_image_load:
2285 case nir_intrinsic_bindless_image_samples:
2286 case nir_intrinsic_bindless_image_size:
2287 case nir_intrinsic_bindless_image_store:
2288 case nir_intrinsic_image_atomic:
2289 case nir_intrinsic_image_atomic_swap:
2290 case nir_intrinsic_image_load:
2291 case nir_intrinsic_image_samples:
2292 case nir_intrinsic_image_size:
2293 case nir_intrinsic_image_store: {
2294 std::vector<Value*> srcs, defs;
2295 Value *indirect;
2296 DataType ty;
2297 int subOp = 0;
2298
2299 uint32_t mask = 0;
2300 TexInstruction::Target target =
2301 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2302 unsigned int argCount = getNIRArgCount(target);
2303 uint16_t location = 0;
2304
2305 if (opInfo.has_dest) {
2306 LValues &newDefs = convert(&insn->def);
2307 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2308 defs.push_back(newDefs[i]);
2309 mask |= 1 << i;
2310 }
2311 }
2312
2313 int lod_src = -1;
2314 bool bindless = false;
2315 switch (op) {
2316 case nir_intrinsic_bindless_image_atomic:
2317 case nir_intrinsic_bindless_image_atomic_swap:
2318 ty = getDType(insn);
2319 bindless = true;
2320 info_out->io.globalAccess |= 0x2;
2321 mask = 0x1;
2322 subOp = getAtomicSubOp(nir_intrinsic_atomic_op(insn));
2323 break;
2324 case nir_intrinsic_image_atomic:
2325 case nir_intrinsic_image_atomic_swap:
2326 ty = getDType(insn);
2327 bindless = false;
2328 info_out->io.globalAccess |= 0x2;
2329 mask = 0x1;
2330 subOp = getAtomicSubOp(nir_intrinsic_atomic_op(insn));
2331 break;
2332 case nir_intrinsic_bindless_image_load:
2333 case nir_intrinsic_image_load:
2334 ty = TYPE_U32;
2335 bindless = op == nir_intrinsic_bindless_image_load;
2336 info_out->io.globalAccess |= 0x1;
2337 lod_src = 4;
2338 break;
2339 case nir_intrinsic_bindless_image_store:
2340 case nir_intrinsic_image_store:
2341 ty = TYPE_U32;
2342 bindless = op == nir_intrinsic_bindless_image_store;
2343 info_out->io.globalAccess |= 0x2;
2344 lod_src = 5;
2345 mask = 0xf;
2346 break;
2347 case nir_intrinsic_bindless_image_samples:
2348 mask = 0x8;
2349 FALLTHROUGH;
2350 case nir_intrinsic_image_samples:
2351 argCount = 0; /* No coordinates */
2352 ty = TYPE_U32;
2353 bindless = op == nir_intrinsic_bindless_image_samples;
2354 mask = 0x8;
2355 break;
2356 case nir_intrinsic_bindless_image_size:
2357 case nir_intrinsic_image_size:
2358 assert(nir_src_as_uint(insn->src[1]) == 0);
2359 argCount = 0; /* No coordinates */
2360 ty = TYPE_U32;
2361 bindless = op == nir_intrinsic_bindless_image_size;
2362 break;
2363 default:
2364 unreachable("unhandled image opcode");
2365 break;
2366 }
2367
2368 if (bindless)
2369 indirect = getSrc(&insn->src[0], 0);
2370 else
2371 location = getIndirect(&insn->src[0], 0, indirect);
2372
2373 /* Pre-GF100, SSBOs and images are in the same HW file, managed by
2374 * prop.cp.gmem. images are located after SSBOs.
2375 */
2376 if (info->target < NVISA_GF100_CHIPSET)
2377 location += nir->info.num_ssbos;
2378
2379 // coords
2380 if (opInfo.num_srcs >= 2)
2381 for (unsigned int i = 0u; i < argCount; ++i)
2382 srcs.push_back(getSrc(&insn->src[1], i));
2383
2384 // the sampler is just another src added after coords
2385 if (opInfo.num_srcs >= 3 && target.isMS())
2386 srcs.push_back(getSrc(&insn->src[2], 0));
2387
2388 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2389 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2390 for (uint8_t i = 0u; i < components; ++i)
2391 srcs.push_back(getSrc(&insn->src[3], i));
2392 }
2393
2394 if (opInfo.num_srcs >= 5 && lod_src != 5)
2395 // 1 for aotmic swap
2396 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2397 srcs.push_back(getSrc(&insn->src[4], i));
2398
2399 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2400 texi->tex.bindless = bindless;
2401 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2402 texi->tex.mask = mask;
2403 texi->cache = convert(nir_intrinsic_access(insn));
2404 texi->setType(ty);
2405 texi->subOp = subOp;
2406
2407 if (indirect)
2408 texi->setIndirectR(indirect);
2409
2410 break;
2411 }
2412 case nir_intrinsic_store_scratch:
2413 case nir_intrinsic_store_shared: {
2414 Value *indirectOffset;
2415 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2416 if (indirectOffset)
2417 indirectOffset = mkOp1v(OP_MOV, TYPE_U32, getSSA(4, FILE_ADDRESS), indirectOffset);
2418
2419 storeVector(insn, 0, nullptr, offset, indirectOffset);
2420 break;
2421 }
2422 case nir_intrinsic_load_kernel_input: {
2423 const DataType dType = getDType(insn);
2424 LValues &newDefs = convert(&insn->def);
2425 Value *indirectOffset;
2426 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2427 if (indirectOffset)
2428 indirectOffset = mkOp1v(OP_MOV, TYPE_U32, getSSA(4, FILE_ADDRESS), indirectOffset);
2429
2430 for (uint8_t i = 0u; i < dest_components; ++i)
2431 loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2432
2433 break;
2434 }
2435 case nir_intrinsic_load_scratch:
2436 case nir_intrinsic_load_shared: {
2437 Value *indirectOffset;
2438 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2439 if (indirectOffset)
2440 indirectOffset = mkOp1v(OP_MOV, TYPE_U32, getSSA(4, FILE_ADDRESS), indirectOffset);
2441
2442 loadVector(insn, 0, nullptr, offset, indirectOffset);
2443
2444 break;
2445 }
2446 case nir_intrinsic_barrier: {
2447 mesa_scope exec_scope = nir_intrinsic_execution_scope(insn);
2448 mesa_scope mem_scope = nir_intrinsic_memory_scope(insn);
2449 nir_variable_mode modes = nir_intrinsic_memory_modes(insn);
2450 nir_variable_mode valid_modes =
2451 nir_var_mem_global | nir_var_image | nir_var_mem_ssbo | nir_var_mem_shared;
2452
2453 if (mem_scope != SCOPE_NONE && (modes & valid_modes)) {
2454
2455 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2456 bar->fixed = 1;
2457
2458 if (mem_scope >= SCOPE_QUEUE_FAMILY)
2459 bar->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
2460 else
2461 bar->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
2462 }
2463
2464 if (exec_scope != SCOPE_NONE &&
2465 !(exec_scope == SCOPE_WORKGROUP && nir->info.stage == MESA_SHADER_TESS_CTRL)) {
2466 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2467 bar->fixed = 1;
2468 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2469 info_out->numBarriers = 1;
2470 }
2471
2472 break;
2473 }
2474 case nir_intrinsic_shader_clock: {
2475 const DataType dType = getDType(insn);
2476 LValues &newDefs = convert(&insn->def);
2477
2478 loadImm(newDefs[0], 0u);
2479 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2480 break;
2481 }
2482 case nir_intrinsic_load_global:
2483 case nir_intrinsic_load_global_constant: {
2484 Value *indirectOffset;
2485 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2486
2487 loadVector(insn, 0, nullptr, offset, indirectOffset);
2488
2489 info_out->io.globalAccess |= 0x1;
2490 break;
2491 }
2492 case nir_intrinsic_store_global: {
2493 Value *indirectOffset;
2494 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2495
2496 storeVector(insn, 0, nullptr, offset, indirectOffset);
2497
2498 info_out->io.globalAccess |= 0x2;
2499 break;
2500 }
2501 case nir_intrinsic_ddx:
2502 case nir_intrinsic_ddx_coarse:
2503 case nir_intrinsic_ddx_fine:
2504 case nir_intrinsic_ddy:
2505 case nir_intrinsic_ddy_coarse:
2506 case nir_intrinsic_ddy_fine: {
2507 assert(insn->def.num_components == 1);
2508 const DataType dType = getDType(insn);
2509 LValues &newDefs = convert(&insn->def);
2510 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0], 0));
2511 break;
2512 }
2513 default:
2514 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2515 return false;
2516 }
2517
2518 return true;
2519 }
2520
2521 bool
visit(nir_jump_instr * insn)2522 Converter::visit(nir_jump_instr *insn)
2523 {
2524 switch (insn->type) {
2525 case nir_jump_break:
2526 case nir_jump_continue: {
2527 bool isBreak = insn->type == nir_jump_break;
2528 nir_block *block = insn->instr.block;
2529 BasicBlock *target = convert(block->successors[0]);
2530 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2531 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2532 break;
2533 }
2534 default:
2535 ERROR("unknown nir_jump_type %u\n", insn->type);
2536 return false;
2537 }
2538
2539 return true;
2540 }
2541
2542 Value*
convert(nir_load_const_instr * insn,uint8_t idx)2543 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2544 {
2545 Value *val;
2546
2547 if (immInsertPos)
2548 setPosition(immInsertPos, true);
2549 else
2550 setPosition(bb, false);
2551
2552 switch (insn->def.bit_size) {
2553 case 64:
2554 val = loadImm(getSSA(8), insn->value[idx].u64);
2555 break;
2556 case 32:
2557 val = loadImm(getSSA(4), insn->value[idx].u32);
2558 break;
2559 case 16:
2560 val = loadImm(getSSA(4), insn->value[idx].u16);
2561 break;
2562 case 8:
2563 val = loadImm(getSSA(4), insn->value[idx].u8);
2564 break;
2565 default:
2566 unreachable("unhandled bit size!\n");
2567 }
2568 setPosition(bb, true);
2569 return val;
2570 }
2571
2572 bool
visit(nir_load_const_instr * insn)2573 Converter::visit(nir_load_const_instr *insn)
2574 {
2575 assert(insn->def.bit_size <= 64);
2576 immediates[insn->def.index] = insn;
2577 return true;
2578 }
2579
2580 #define DEFAULT_CHECKS \
2581 if (insn->def.num_components > 1) { \
2582 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2583 return false; \
2584 }
2585 bool
visit(nir_alu_instr * insn)2586 Converter::visit(nir_alu_instr *insn)
2587 {
2588 const nir_op op = insn->op;
2589 const nir_op_info &info = nir_op_infos[op];
2590 DataType dType = getDType(insn);
2591 const std::vector<DataType> sTypes = getSTypes(insn);
2592
2593 Instruction *oldPos = this->bb->getExit();
2594
2595 switch (op) {
2596 case nir_op_fabs:
2597 case nir_op_iabs:
2598 case nir_op_fadd:
2599 case nir_op_iadd:
2600 case nir_op_iand:
2601 case nir_op_fceil:
2602 case nir_op_fcos:
2603 case nir_op_fdiv:
2604 case nir_op_idiv:
2605 case nir_op_udiv:
2606 case nir_op_fexp2:
2607 case nir_op_ffloor:
2608 case nir_op_ffma:
2609 case nir_op_ffmaz:
2610 case nir_op_flog2:
2611 case nir_op_fmax:
2612 case nir_op_imax:
2613 case nir_op_umax:
2614 case nir_op_fmin:
2615 case nir_op_imin:
2616 case nir_op_umin:
2617 case nir_op_fmod:
2618 case nir_op_imod:
2619 case nir_op_umod:
2620 case nir_op_fmul:
2621 case nir_op_fmulz:
2622 case nir_op_amul:
2623 case nir_op_imul:
2624 case nir_op_imul_high:
2625 case nir_op_umul_high:
2626 case nir_op_fneg:
2627 case nir_op_ineg:
2628 case nir_op_inot:
2629 case nir_op_ior:
2630 case nir_op_pack_64_2x32_split:
2631 case nir_op_frcp:
2632 case nir_op_frem:
2633 case nir_op_irem:
2634 case nir_op_frsq:
2635 case nir_op_fsat:
2636 case nir_op_ishr:
2637 case nir_op_ushr:
2638 case nir_op_fsin:
2639 case nir_op_fsqrt:
2640 case nir_op_ftrunc:
2641 case nir_op_ishl:
2642 case nir_op_ixor: {
2643 DEFAULT_CHECKS;
2644 LValues &newDefs = convert(&insn->def);
2645 operation preOp = preOperationNeeded(op);
2646 if (preOp != OP_NOP) {
2647 assert(info.num_inputs < 2);
2648 Value *tmp = getSSA(typeSizeof(dType));
2649 Instruction *i0 = mkOp(preOp, dType, tmp);
2650 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2651 if (info.num_inputs) {
2652 i0->setSrc(0, getSrc(&insn->src[0]));
2653 i1->setSrc(0, tmp);
2654 }
2655 i1->subOp = getSubOp(op);
2656 } else {
2657 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2658 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2659 i->setSrc(s, getSrc(&insn->src[s]));
2660
2661 switch (op) {
2662 case nir_op_fmul:
2663 case nir_op_ffma:
2664 i->dnz = this->info->io.mul_zero_wins;
2665 break;
2666 case nir_op_fmulz:
2667 case nir_op_ffmaz:
2668 i->dnz = true;
2669 break;
2670 default:
2671 break;
2672 }
2673 }
2674 i->subOp = getSubOp(op);
2675 }
2676 break;
2677 }
2678 case nir_op_ifind_msb:
2679 case nir_op_ufind_msb: {
2680 DEFAULT_CHECKS;
2681 LValues &newDefs = convert(&insn->def);
2682 dType = sTypes[0];
2683 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2684 break;
2685 }
2686 case nir_op_fround_even: {
2687 DEFAULT_CHECKS;
2688 LValues &newDefs = convert(&insn->def);
2689 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2690 break;
2691 }
2692 // convert instructions
2693 case nir_op_f2i8:
2694 case nir_op_f2u8:
2695 case nir_op_i2i8:
2696 case nir_op_u2u8:
2697 case nir_op_f2i16:
2698 case nir_op_f2u16:
2699 case nir_op_i2i16:
2700 case nir_op_u2u16:
2701 case nir_op_f2f32:
2702 case nir_op_f2i32:
2703 case nir_op_f2u32:
2704 case nir_op_i2f32:
2705 case nir_op_i2i32:
2706 case nir_op_u2f32:
2707 case nir_op_u2u32:
2708 case nir_op_f2f64:
2709 case nir_op_f2i64:
2710 case nir_op_f2u64:
2711 case nir_op_i2f64:
2712 case nir_op_i2i64:
2713 case nir_op_u2f64:
2714 case nir_op_u2u64: {
2715 DEFAULT_CHECKS;
2716 LValues &newDefs = convert(&insn->def);
2717 DataType stype = sTypes[0];
2718 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2719 if (::isFloatType(stype) && isIntType(dType))
2720 i->rnd = ROUND_Z;
2721 i->sType = stype;
2722 break;
2723 }
2724 // compare instructions
2725 case nir_op_ieq8:
2726 case nir_op_ige8:
2727 case nir_op_uge8:
2728 case nir_op_ilt8:
2729 case nir_op_ult8:
2730 case nir_op_ine8:
2731 case nir_op_ieq16:
2732 case nir_op_ige16:
2733 case nir_op_uge16:
2734 case nir_op_ilt16:
2735 case nir_op_ult16:
2736 case nir_op_ine16:
2737 case nir_op_feq32:
2738 case nir_op_ieq32:
2739 case nir_op_fge32:
2740 case nir_op_ige32:
2741 case nir_op_uge32:
2742 case nir_op_flt32:
2743 case nir_op_ilt32:
2744 case nir_op_ult32:
2745 case nir_op_fneu32:
2746 case nir_op_ine32: {
2747 DEFAULT_CHECKS;
2748 LValues &newDefs = convert(&insn->def);
2749 Instruction *i = mkCmp(getOperation(op),
2750 getCondCode(op),
2751 dType,
2752 newDefs[0],
2753 dType,
2754 getSrc(&insn->src[0]),
2755 getSrc(&insn->src[1]));
2756 if (info.num_inputs == 3)
2757 i->setSrc(2, getSrc(&insn->src[2]));
2758 i->sType = sTypes[0];
2759 break;
2760 }
2761 case nir_op_mov: {
2762 LValues &newDefs = convert(&insn->def);
2763 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2764 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2765 }
2766 break;
2767 }
2768 case nir_op_vec2:
2769 case nir_op_vec3:
2770 case nir_op_vec4:
2771 case nir_op_vec8:
2772 case nir_op_vec16: {
2773 LValues &newDefs = convert(&insn->def);
2774 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2775 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2776 }
2777 break;
2778 }
2779 // (un)pack
2780 case nir_op_pack_64_2x32: {
2781 LValues &newDefs = convert(&insn->def);
2782 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2783 merge->setSrc(0, getSrc(&insn->src[0], 0));
2784 merge->setSrc(1, getSrc(&insn->src[0], 1));
2785 break;
2786 }
2787 case nir_op_pack_half_2x16_split: {
2788 LValues &newDefs = convert(&insn->def);
2789 Value *tmpH = getSSA();
2790 Value *tmpL = getSSA();
2791
2792 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2793 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2794 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2795 break;
2796 }
2797 case nir_op_unpack_half_2x16_split_x:
2798 case nir_op_unpack_half_2x16_split_y: {
2799 LValues &newDefs = convert(&insn->def);
2800 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2801 if (op == nir_op_unpack_half_2x16_split_y)
2802 cvt->subOp = 1;
2803 break;
2804 }
2805 case nir_op_unpack_64_2x32: {
2806 LValues &newDefs = convert(&insn->def);
2807 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2808 break;
2809 }
2810 case nir_op_unpack_64_2x32_split_x: {
2811 LValues &newDefs = convert(&insn->def);
2812 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2813 break;
2814 }
2815 case nir_op_unpack_64_2x32_split_y: {
2816 LValues &newDefs = convert(&insn->def);
2817 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2818 break;
2819 }
2820 // special instructions
2821 case nir_op_fsign:
2822 case nir_op_isign: {
2823 DEFAULT_CHECKS;
2824 DataType iType;
2825 if (::isFloatType(dType))
2826 iType = TYPE_F32;
2827 else
2828 iType = TYPE_S32;
2829
2830 LValues &newDefs = convert(&insn->def);
2831 LValue *val0 = getScratch();
2832 LValue *val1 = getScratch();
2833 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2834 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2835
2836 if (dType == TYPE_F64) {
2837 mkOp2(OP_SUB, iType, val0, val0, val1);
2838 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2839 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2840 mkOp2(OP_SUB, iType, val0, val1, val0);
2841 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2842 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2843 } else if (::isFloatType(dType))
2844 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2845 else
2846 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2847 break;
2848 }
2849 case nir_op_fcsel:
2850 case nir_op_b32csel: {
2851 DEFAULT_CHECKS;
2852 LValues &newDefs = convert(&insn->def);
2853 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2854 break;
2855 }
2856 case nir_op_ibitfield_extract:
2857 case nir_op_ubitfield_extract: {
2858 DEFAULT_CHECKS;
2859 Value *tmp = getSSA();
2860 LValues &newDefs = convert(&insn->def);
2861 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2862 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2863 break;
2864 }
2865 case nir_op_bfm: {
2866 DEFAULT_CHECKS;
2867 LValues &newDefs = convert(&insn->def);
2868 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2869 break;
2870 }
2871 case nir_op_bitfield_insert: {
2872 DEFAULT_CHECKS;
2873 LValues &newDefs = convert(&insn->def);
2874 LValue *temp = getSSA();
2875 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2876 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2877 break;
2878 }
2879 case nir_op_bit_count: {
2880 DEFAULT_CHECKS;
2881 LValues &newDefs = convert(&insn->def);
2882 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2883 break;
2884 }
2885 case nir_op_bitfield_reverse: {
2886 DEFAULT_CHECKS;
2887 LValues &newDefs = convert(&insn->def);
2888 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2889 break;
2890 }
2891 case nir_op_find_lsb: {
2892 DEFAULT_CHECKS;
2893 LValues &newDefs = convert(&insn->def);
2894 Value *tmp = getSSA();
2895 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2896 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2897 break;
2898 }
2899 case nir_op_extract_u8: {
2900 DEFAULT_CHECKS;
2901 LValues &newDefs = convert(&insn->def);
2902 Value *prmt = getSSA();
2903 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2904 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2905 break;
2906 }
2907 case nir_op_extract_i8: {
2908 DEFAULT_CHECKS;
2909 LValues &newDefs = convert(&insn->def);
2910 Value *prmt = getSSA();
2911 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2912 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2913 break;
2914 }
2915 case nir_op_extract_u16: {
2916 DEFAULT_CHECKS;
2917 LValues &newDefs = convert(&insn->def);
2918 Value *prmt = getSSA();
2919 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2920 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2921 break;
2922 }
2923 case nir_op_extract_i16: {
2924 DEFAULT_CHECKS;
2925 LValues &newDefs = convert(&insn->def);
2926 Value *prmt = getSSA();
2927 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2928 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2929 break;
2930 }
2931 case nir_op_fquantize2f16: {
2932 DEFAULT_CHECKS;
2933 LValues &newDefs = convert(&insn->def);
2934 Value *tmp = getSSA();
2935 mkCvt(OP_CVT, TYPE_F16, tmp, TYPE_F32, getSrc(&insn->src[0]))->ftz = 1;
2936 mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, tmp);
2937 break;
2938 }
2939 case nir_op_urol: {
2940 DEFAULT_CHECKS;
2941 LValues &newDefs = convert(&insn->def);
2942 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2943 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2944 ->subOp = NV50_IR_SUBOP_SHF_L |
2945 NV50_IR_SUBOP_SHF_W |
2946 NV50_IR_SUBOP_SHF_HI;
2947 break;
2948 }
2949 case nir_op_uror: {
2950 DEFAULT_CHECKS;
2951 LValues &newDefs = convert(&insn->def);
2952 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2953 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2954 ->subOp = NV50_IR_SUBOP_SHF_R |
2955 NV50_IR_SUBOP_SHF_W |
2956 NV50_IR_SUBOP_SHF_LO;
2957 break;
2958 }
2959 // boolean conversions
2960 case nir_op_b2f32: {
2961 DEFAULT_CHECKS;
2962 LValues &newDefs = convert(&insn->def);
2963 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2964 break;
2965 }
2966 case nir_op_b2f64: {
2967 DEFAULT_CHECKS;
2968 LValues &newDefs = convert(&insn->def);
2969 Value *tmp = getSSA(4);
2970 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2971 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2972 break;
2973 }
2974 case nir_op_b2i8:
2975 case nir_op_b2i16:
2976 case nir_op_b2i32: {
2977 DEFAULT_CHECKS;
2978 LValues &newDefs = convert(&insn->def);
2979 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2980 break;
2981 }
2982 case nir_op_b2i64: {
2983 DEFAULT_CHECKS;
2984 LValues &newDefs = convert(&insn->def);
2985 LValue *def = getScratch();
2986 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2987 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2988 break;
2989 }
2990 default:
2991 ERROR("unknown nir_op %s\n", info.name);
2992 assert(false);
2993 return false;
2994 }
2995
2996 if (!oldPos) {
2997 oldPos = this->bb->getEntry();
2998 oldPos->precise = insn->exact;
2999 }
3000
3001 if (unlikely(!oldPos))
3002 return true;
3003
3004 while (oldPos->next) {
3005 oldPos = oldPos->next;
3006 oldPos->precise = insn->exact;
3007 }
3008
3009 return true;
3010 }
3011 #undef DEFAULT_CHECKS
3012
3013 bool
visit(nir_undef_instr * insn)3014 Converter::visit(nir_undef_instr *insn)
3015 {
3016 LValues &newDefs = convert(&insn->def);
3017 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
3018 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
3019 }
3020 return true;
3021 }
3022
3023 #define CASE_SAMPLER(ty) \
3024 case GLSL_SAMPLER_DIM_ ## ty : \
3025 if (isArray && !isShadow) \
3026 return TEX_TARGET_ ## ty ## _ARRAY; \
3027 else if (!isArray && isShadow) \
3028 return TEX_TARGET_## ty ## _SHADOW; \
3029 else if (isArray && isShadow) \
3030 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
3031 else \
3032 return TEX_TARGET_ ## ty
3033
3034 TexTarget
convert(glsl_sampler_dim dim,bool isArray,bool isShadow)3035 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
3036 {
3037 switch (dim) {
3038 CASE_SAMPLER(1D);
3039 case GLSL_SAMPLER_DIM_SUBPASS:
3040 CASE_SAMPLER(2D);
3041 CASE_SAMPLER(CUBE);
3042 case GLSL_SAMPLER_DIM_3D:
3043 return TEX_TARGET_3D;
3044 case GLSL_SAMPLER_DIM_MS:
3045 case GLSL_SAMPLER_DIM_SUBPASS_MS:
3046 if (isArray)
3047 return TEX_TARGET_2D_MS_ARRAY;
3048 return TEX_TARGET_2D_MS;
3049 case GLSL_SAMPLER_DIM_RECT:
3050 if (isShadow)
3051 return TEX_TARGET_RECT_SHADOW;
3052 return TEX_TARGET_RECT;
3053 case GLSL_SAMPLER_DIM_BUF:
3054 return TEX_TARGET_BUFFER;
3055 case GLSL_SAMPLER_DIM_EXTERNAL:
3056 return TEX_TARGET_2D;
3057 default:
3058 ERROR("unknown glsl_sampler_dim %u\n", dim);
3059 assert(false);
3060 return TEX_TARGET_COUNT;
3061 }
3062 }
3063 #undef CASE_SAMPLER
3064
3065 unsigned int
getNIRArgCount(TexInstruction::Target & target)3066 Converter::getNIRArgCount(TexInstruction::Target& target)
3067 {
3068 unsigned int result = target.getArgCount();
3069 if (target.isCube() && target.isArray())
3070 result--;
3071 if (target.isMS())
3072 result--;
3073 return result;
3074 }
3075
3076 CacheMode
convert(enum gl_access_qualifier access)3077 Converter::convert(enum gl_access_qualifier access)
3078 {
3079 if (access & ACCESS_VOLATILE)
3080 return CACHE_CV;
3081 if (access & ACCESS_COHERENT)
3082 return CACHE_CG;
3083 return CACHE_CA;
3084 }
3085
3086 bool
visit(nir_tex_instr * insn)3087 Converter::visit(nir_tex_instr *insn)
3088 {
3089 switch (insn->op) {
3090 case nir_texop_lod:
3091 case nir_texop_query_levels:
3092 case nir_texop_tex:
3093 case nir_texop_texture_samples:
3094 case nir_texop_tg4:
3095 case nir_texop_txb:
3096 case nir_texop_txd:
3097 case nir_texop_txf:
3098 case nir_texop_txf_ms:
3099 case nir_texop_txl:
3100 case nir_texop_txs: {
3101 LValues &newDefs = convert(&insn->def);
3102 std::vector<Value*> srcs;
3103 std::vector<Value*> defs;
3104 std::vector<nir_src*> offsets;
3105 uint8_t mask = 0;
3106 bool lz = false;
3107 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
3108 operation op = getOperation(insn->op);
3109
3110 int r, s;
3111 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
3112 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
3113 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
3114 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
3115 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
3116 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
3117 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
3118 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
3119 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
3120 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
3121 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
3122 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
3123
3124 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
3125 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
3126
3127 srcs.resize(insn->coord_components);
3128 for (uint8_t i = 0u; i < insn->coord_components; ++i)
3129 srcs[i] = getSrc(&insn->src[coordsIdx].src, i);
3130
3131 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3132 if (insn->coord_components) {
3133 uint32_t argCount = target.getArgCount();
3134
3135 if (target.isMS())
3136 argCount -= 1;
3137
3138 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
3139 srcs.push_back(getSSA());
3140 }
3141
3142 if (biasIdx != -1)
3143 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
3144 // TXQ requires a lod argument for all queries we care about here.
3145 // For other ops on MS textures we skip it.
3146 if (lodIdx != -1 && !target.isMS())
3147 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3148 else if (op == OP_TXQ)
3149 srcs.push_back(zero); // TXQ always needs an LOD
3150 else if (op == OP_TXF)
3151 lz = true;
3152 if (msIdx != -1)
3153 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3154 if (offsetIdx != -1)
3155 offsets.push_back(&insn->src[offsetIdx].src);
3156 if (compIdx != -1)
3157 srcs.push_back(getSrc(&insn->src[compIdx].src, 0));
3158 if (texOffIdx != -1) {
3159 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3160 texOffIdx = srcs.size() - 1;
3161 }
3162 if (sampOffIdx != -1) {
3163 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3164 sampOffIdx = srcs.size() - 1;
3165 }
3166 if (bindless) {
3167 // currently we use the lower bits
3168 Value *split[2];
3169 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3170
3171 mkSplit(split, 4, handle);
3172
3173 srcs.push_back(split[0]);
3174 texOffIdx = srcs.size() - 1;
3175 }
3176
3177 r = bindless ? 0xff : insn->texture_index;
3178 s = bindless ? 0x1f : insn->sampler_index;
3179 if (op == OP_TXF || op == OP_TXQ)
3180 s = 0;
3181
3182 defs.resize(newDefs.size());
3183 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3184 defs[d] = newDefs[d];
3185 mask |= 1 << d;
3186 }
3187 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3188 lz = true;
3189
3190 // TODO figure out which instructions still need this.
3191 if (srcs.empty())
3192 srcs.push_back(loadImm(NULL, 0));
3193
3194 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3195 texi->tex.levelZero = lz;
3196 texi->tex.mask = mask;
3197 texi->tex.bindless = bindless;
3198
3199 if (texOffIdx != -1)
3200 texi->tex.rIndirectSrc = texOffIdx;
3201 if (sampOffIdx != -1)
3202 texi->tex.sIndirectSrc = sampOffIdx;
3203
3204 switch (insn->op) {
3205 case nir_texop_tg4:
3206 if (!target.isShadow())
3207 texi->tex.gatherComp = insn->component;
3208 break;
3209 case nir_texop_txs:
3210 texi->tex.query = TXQ_DIMS;
3211 break;
3212 case nir_texop_texture_samples:
3213 texi->tex.mask = 0x4;
3214 texi->tex.query = TXQ_TYPE;
3215 break;
3216 case nir_texop_query_levels:
3217 texi->tex.mask = 0x8;
3218 texi->tex.query = TXQ_DIMS;
3219 break;
3220 // TODO: TXQ_SAMPLE_POSITION needs the sample id instead of the LOD emited further up.
3221 default:
3222 break;
3223 }
3224
3225 texi->tex.useOffsets = offsets.size();
3226 if (texi->tex.useOffsets) {
3227 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3228 for (uint32_t c = 0u; c < 3; ++c) {
3229 uint8_t s2 = std::min(c, target.getDim() - 1);
3230 texi->offset[s][c].set(getSrc(offsets[s], s2));
3231 texi->offset[s][c].setInsn(texi);
3232 }
3233 }
3234 }
3235
3236 if (op == OP_TXG && offsetIdx == -1) {
3237 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3238 texi->tex.useOffsets = 4;
3239 setPosition(texi, false);
3240 for (uint8_t i = 0; i < 4; ++i) {
3241 for (uint8_t j = 0; j < 2; ++j) {
3242 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3243 texi->offset[i][j].setInsn(texi);
3244 }
3245 }
3246 setPosition(texi, true);
3247 }
3248 }
3249
3250 if (ddxIdx != -1 && ddyIdx != -1) {
3251 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3252 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3253 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3254 }
3255 }
3256
3257 break;
3258 }
3259 default:
3260 ERROR("unknown nir_texop %u\n", insn->op);
3261 return false;
3262 }
3263 return true;
3264 }
3265
3266 /* nouveau's RA doesn't track the liveness of exported registers in the fragment
3267 * shader, so we need all the store_outputs to appear at the end of the shader
3268 * with no other instructions that might generate a temp value in between them.
3269 */
3270 static void
nv_nir_move_stores_to_end(nir_shader * s)3271 nv_nir_move_stores_to_end(nir_shader *s)
3272 {
3273 nir_function_impl *impl = nir_shader_get_entrypoint(s);
3274 nir_block *block = nir_impl_last_block(impl);
3275 nir_instr *first_store = NULL;
3276
3277 nir_foreach_instr_safe(instr, block) {
3278 if (instr == first_store)
3279 break;
3280 if (instr->type != nir_instr_type_intrinsic)
3281 continue;
3282 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
3283 if (intrin->intrinsic == nir_intrinsic_store_output) {
3284 nir_instr_remove(instr);
3285 nir_instr_insert(nir_after_block(block), instr);
3286
3287 if (!first_store)
3288 first_store = instr;
3289 }
3290 }
3291 nir_metadata_preserve(impl,
3292 nir_metadata_control_flow);
3293 }
3294
3295 unsigned
lowerBitSizeCB(const nir_instr * instr,void * data)3296 Converter::lowerBitSizeCB(const nir_instr *instr, void *data)
3297 {
3298 Converter *instance = static_cast<Converter *>(data);
3299 nir_alu_instr *alu;
3300
3301 if (instr->type != nir_instr_type_alu)
3302 return 0;
3303
3304 alu = nir_instr_as_alu(instr);
3305
3306 switch (alu->op) {
3307 /* TODO: Check for operation OP_SET instead of all listed nir opcodes
3308 * individually.
3309 *
3310 * Currently, we can't call getOperation(nir_op), since not all nir opcodes
3311 * are handled within getOperation() and we'd run into an assert().
3312 *
3313 * Adding all nir opcodes to getOperation() isn't trivial, since the
3314 * enum operation of some of the nir opcodes isn't distinct (e.g. depends
3315 * on the data type).
3316 */
3317 case nir_op_ieq8:
3318 case nir_op_ige8:
3319 case nir_op_uge8:
3320 case nir_op_ilt8:
3321 case nir_op_ult8:
3322 case nir_op_ine8:
3323 case nir_op_ieq16:
3324 case nir_op_ige16:
3325 case nir_op_uge16:
3326 case nir_op_ilt16:
3327 case nir_op_ult16:
3328 case nir_op_ine16:
3329 case nir_op_feq32:
3330 case nir_op_ieq32:
3331 case nir_op_fge32:
3332 case nir_op_ige32:
3333 case nir_op_uge32:
3334 case nir_op_flt32:
3335 case nir_op_ilt32:
3336 case nir_op_ult32:
3337 case nir_op_fneu32:
3338 case nir_op_ine32: {
3339 DataType stype = instance->getSTypes(alu)[0];
3340
3341 if (isSignedIntType(stype) && typeSizeof(stype) < 4)
3342 return 32;
3343
3344 return 0;
3345 }
3346 case nir_op_i2f64:
3347 case nir_op_u2f64: {
3348 DataType stype = instance->getSTypes(alu)[0];
3349
3350 if (isIntType(stype) && (typeSizeof(stype) <= 2))
3351 return 32;
3352
3353 return 0;
3354 }
3355 default:
3356 return 0;
3357 }
3358 }
3359
3360 void
runOptLoop()3361 Converter::runOptLoop()
3362 {
3363 bool progress;
3364 do {
3365 progress = false;
3366 NIR_PASS(progress, nir, nir_copy_prop);
3367 NIR_PASS(progress, nir, nir_opt_remove_phis);
3368 NIR_PASS(progress, nir, nir_opt_loop);
3369 NIR_PASS(progress, nir, nir_opt_cse);
3370 NIR_PASS(progress, nir, nir_opt_algebraic);
3371 NIR_PASS(progress, nir, nir_opt_constant_folding);
3372 NIR_PASS(progress, nir, nir_copy_prop);
3373 NIR_PASS(progress, nir, nir_opt_dce);
3374 NIR_PASS(progress, nir, nir_opt_dead_cf);
3375 NIR_PASS(progress, nir, nir_lower_64bit_phis);
3376 } while (progress);
3377 }
3378
3379 bool
run()3380 Converter::run()
3381 {
3382 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3383 nir_print_shader(nir, stderr);
3384
3385 struct nir_lower_subgroups_options subgroup_options = {};
3386 subgroup_options.subgroup_size = 32;
3387 subgroup_options.ballot_bit_size = 32;
3388 subgroup_options.ballot_components = 1;
3389 subgroup_options.lower_elect = true;
3390 subgroup_options.lower_inverse_ballot = true;
3391
3392 unsigned lower_flrp = (nir->options->lower_flrp16 ? 16 : 0) |
3393 (nir->options->lower_flrp32 ? 32 : 0) |
3394 (nir->options->lower_flrp64 ? 64 : 0);
3395 assert(lower_flrp);
3396
3397 info_out->io.genUserClip = info->io.genUserClip;
3398 if (info->io.genUserClip > 0) {
3399 bool lowered = false;
3400
3401 if (nir->info.stage == MESA_SHADER_VERTEX ||
3402 nir->info.stage == MESA_SHADER_TESS_EVAL)
3403 NIR_PASS(lowered, nir, nir_lower_clip_vs,
3404 (1 << info->io.genUserClip) - 1, true, false, NULL);
3405 else if (nir->info.stage == MESA_SHADER_GEOMETRY)
3406 NIR_PASS(lowered, nir, nir_lower_clip_gs,
3407 (1 << info->io.genUserClip) - 1, false, NULL);
3408
3409 if (lowered) {
3410 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
3411 NIR_PASS_V(nir, nir_lower_io_to_temporaries, impl, true, false);
3412 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
3413 NIR_PASS_V(nir, nv50_nir_lower_load_user_clip_plane, info);
3414 } else {
3415 info_out->io.genUserClip = -1;
3416 }
3417 }
3418
3419 /* prepare for IO lowering */
3420 NIR_PASS_V(nir, nir_lower_flrp, lower_flrp, false);
3421 NIR_PASS_V(nir, nir_opt_deref);
3422 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3423
3424 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3425 type_size, (nir_lower_io_options)0);
3426
3427 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3428
3429 struct nir_lower_tex_options tex_options = {};
3430 tex_options.lower_txp = ~0;
3431
3432 NIR_PASS_V(nir, nir_lower_tex, &tex_options);
3433
3434 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3435 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3436 NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
3437
3438 NIR_PASS_V(nir, nir_lower_frexp);
3439
3440 /*TODO: improve this lowering/optimisation loop so that we can use
3441 * nir_opt_idiv_const effectively before this.
3442 */
3443 nir_lower_idiv_options idiv_options = {
3444 .allow_fp16 = true,
3445 };
3446 NIR_PASS_V(nir, nir_lower_idiv, &idiv_options);
3447
3448 runOptLoop();
3449
3450 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3451 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp,
3452 glsl_get_natural_size_align_bytes);
3453 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3454 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3455
3456 NIR_PASS_V(nir, nir_opt_constant_folding); // Improves aliasing information
3457
3458 nir_load_store_vectorize_options vectorize_opts = {};
3459 vectorize_opts.modes = nir_var_mem_global |
3460 nir_var_mem_ssbo |
3461 nir_var_mem_shared |
3462 nir_var_shader_temp;
3463 vectorize_opts.callback = Converter::memVectorizeCb;
3464 vectorize_opts.cb_data = this;
3465 NIR_PASS_V(nir, nir_opt_load_store_vectorize, &vectorize_opts);
3466
3467 nir_lower_mem_access_bit_sizes_options mem_bit_sizes = {};
3468 mem_bit_sizes.modes = nir_var_mem_global |
3469 nir_var_mem_constant |
3470 nir_var_mem_ssbo |
3471 nir_var_mem_shared |
3472 nir_var_shader_temp | nir_var_function_temp;
3473 mem_bit_sizes.callback = Converter::getMemAccessSizeAlign;
3474 mem_bit_sizes.cb_data = this;
3475 NIR_PASS_V(nir, nir_lower_mem_access_bit_sizes, &mem_bit_sizes);
3476
3477 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3478 NIR_PASS_V(nir, nir_lower_pack);
3479
3480 runOptLoop();
3481
3482 NIR_PASS_V(nir, nir_opt_combine_barriers, NULL, NULL);
3483
3484 nir_move_options move_options =
3485 (nir_move_options)(nir_move_const_undef |
3486 nir_move_load_ubo |
3487 nir_move_load_uniform |
3488 nir_move_load_input);
3489 NIR_PASS_V(nir, nir_opt_sink, move_options);
3490 NIR_PASS_V(nir, nir_opt_move, move_options);
3491
3492 if (nir->info.stage == MESA_SHADER_FRAGMENT)
3493 NIR_PASS_V(nir, nv_nir_move_stores_to_end);
3494
3495 NIR_PASS_V(nir, nir_opt_algebraic_late);
3496
3497 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3498 NIR_PASS_V(nir, nir_lower_bit_size, Converter::lowerBitSizeCB, this);
3499
3500 NIR_PASS_V(nir, nir_divergence_analysis);
3501 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3502
3503 // Garbage collect dead instructions
3504 nir_sweep(nir);
3505
3506 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
3507
3508 if (!parseNIR()) {
3509 ERROR("Couldn't prase NIR!\n");
3510 return false;
3511 }
3512
3513 if (!assignSlots()) {
3514 ERROR("Couldn't assign slots!\n");
3515 return false;
3516 }
3517
3518 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3519 nir_print_shader(nir, stderr);
3520
3521 nir_foreach_function(function, nir) {
3522 if (!visit(function))
3523 return false;
3524 }
3525
3526 return true;
3527 }
3528
3529 } // unnamed namespace
3530
3531 namespace nv50_ir {
3532
3533 bool
makeFromNIR(struct nv50_ir_prog_info * info,struct nv50_ir_prog_info_out * info_out)3534 Program::makeFromNIR(struct nv50_ir_prog_info *info,
3535 struct nv50_ir_prog_info_out *info_out)
3536 {
3537 nir_shader *nir = info->bin.nir;
3538 Converter converter(this, nir, info, info_out);
3539 bool result = converter.run();
3540 if (!result)
3541 return result;
3542 LoweringHelper lowering;
3543 lowering.run(this);
3544 tlsSize = info_out->bin.tlsSpace;
3545 return result;
3546 }
3547
3548 } // namespace nv50_ir
3549
3550 static nir_shader_compiler_options
nvir_nir_shader_compiler_options(int chipset,uint8_t shader_type)3551 nvir_nir_shader_compiler_options(int chipset, uint8_t shader_type)
3552 {
3553 nir_shader_compiler_options op = {};
3554 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3555 op.lower_ffma16 = false;
3556 op.lower_ffma32 = false;
3557 op.lower_ffma64 = false;
3558 op.fuse_ffma16 = false; /* nir doesn't track mad vs fma */
3559 op.fuse_ffma32 = false; /* nir doesn't track mad vs fma */
3560 op.fuse_ffma64 = false; /* nir doesn't track mad vs fma */
3561 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3562 op.lower_flrp32 = true;
3563 op.lower_flrp64 = true;
3564 op.lower_fpow = true;
3565 op.lower_fsat = false;
3566 op.lower_fsqrt = false; // TODO: only before gm200
3567 op.lower_sincos = false;
3568 op.lower_fmod = true;
3569 op.lower_bitfield_extract = (chipset >= NVISA_GV100_CHIPSET || chipset < NVISA_GF100_CHIPSET);
3570 op.lower_bitfield_insert = (chipset >= NVISA_GV100_CHIPSET || chipset < NVISA_GF100_CHIPSET);
3571 op.lower_bitfield_reverse = (chipset < NVISA_GF100_CHIPSET);
3572 op.lower_bit_count = (chipset < NVISA_GF100_CHIPSET);
3573 op.lower_ifind_msb = (chipset < NVISA_GF100_CHIPSET);
3574 op.lower_find_lsb = (chipset < NVISA_GF100_CHIPSET);
3575 op.lower_uadd_carry = true; // TODO
3576 op.lower_usub_borrow = true; // TODO
3577 op.lower_mul_high = false;
3578 op.lower_fneg = false;
3579 op.lower_ineg = false;
3580 op.lower_scmp = true; // TODO: not implemented yet
3581 op.lower_vector_cmp = false;
3582 op.lower_bitops = false;
3583 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3584 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3585 op.lower_fdph = false;
3586 op.lower_fdot = false;
3587 op.fdot_replicates = false; // TODO
3588 op.lower_ffloor = false; // TODO
3589 op.lower_ffract = true;
3590 op.lower_fceil = false; // TODO
3591 op.lower_ftrunc = false;
3592 op.lower_ldexp = true;
3593 op.lower_pack_half_2x16 = true;
3594 op.lower_pack_unorm_2x16 = true;
3595 op.lower_pack_snorm_2x16 = true;
3596 op.lower_pack_unorm_4x8 = true;
3597 op.lower_pack_snorm_4x8 = true;
3598 op.lower_unpack_half_2x16 = true;
3599 op.lower_unpack_unorm_2x16 = true;
3600 op.lower_unpack_snorm_2x16 = true;
3601 op.lower_unpack_unorm_4x8 = true;
3602 op.lower_unpack_snorm_4x8 = true;
3603 op.lower_pack_split = false;
3604 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3605 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3606 op.lower_insert_byte = true;
3607 op.lower_insert_word = true;
3608 op.lower_all_io_to_temps = false;
3609 op.lower_all_io_to_elements = false;
3610 op.vertex_id_zero_based = false;
3611 op.lower_base_vertex = false;
3612 op.lower_helper_invocation = false;
3613 op.optimize_sample_mask_in = false;
3614 op.lower_cs_local_index_to_id = true;
3615 op.lower_cs_local_id_to_index = false;
3616 op.lower_device_index_to_zero = true;
3617 op.lower_wpos_pntc = false; // TODO
3618 op.lower_hadd = true; // TODO
3619 op.lower_uadd_sat = true; // TODO
3620 op.lower_usub_sat = true; // TODO
3621 op.lower_iadd_sat = true; // TODO
3622 op.vectorize_io = false;
3623 op.lower_to_scalar = false;
3624 op.unify_interfaces = false;
3625 op.use_interpolated_input_intrinsics = true;
3626 op.lower_mul_2x32_64 = true; // TODO
3627 op.has_rotate32 = (chipset >= NVISA_GV100_CHIPSET);
3628 op.has_imul24 = false;
3629 op.has_fmulz = (chipset > NVISA_G80_CHIPSET);
3630 op.intel_vec4 = false;
3631 op.lower_uniforms_to_ubo = true;
3632 op.force_indirect_unrolling = (nir_variable_mode) (
3633 ((shader_type == PIPE_SHADER_FRAGMENT) ? nir_var_shader_out : 0) |
3634 /* HW doesn't support indirect addressing of fragment program inputs
3635 * on Volta. The binary driver generates a function to handle every
3636 * possible indirection, and indirectly calls the function to handle
3637 * this instead.
3638 */
3639 ((chipset >= NVISA_GV100_CHIPSET && shader_type == PIPE_SHADER_FRAGMENT) ? nir_var_shader_in : 0)
3640 );
3641 op.force_indirect_unrolling_sampler = (chipset < NVISA_GF100_CHIPSET);
3642 op.max_unroll_iterations = 32;
3643 op.lower_int64_options = (nir_lower_int64_options) (
3644 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3645 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3646 nir_lower_divmod64 |
3647 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3648 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_bcsel64 : 0) |
3649 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3650 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3651 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3652 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3653 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3654 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3655 nir_lower_imul_2x32_64 |
3656 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3657 nir_lower_ufind_msb64 |
3658 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_conv64 : 0)
3659 );
3660 op.lower_doubles_options = (nir_lower_doubles_options) (
3661 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3662 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3663 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3664 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3665 nir_lower_dmod |
3666 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3667 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3668 );
3669 op.discard_is_demote = true;
3670 op.has_ddx_intrinsics = true;
3671 op.scalarize_ddx = true;
3672 return op;
3673 }
3674
3675 static const nir_shader_compiler_options g80_nir_shader_compiler_options =
3676 nvir_nir_shader_compiler_options(NVISA_G80_CHIPSET, PIPE_SHADER_TYPES);
3677 static const nir_shader_compiler_options g80_fs_nir_shader_compiler_options =
3678 nvir_nir_shader_compiler_options(NVISA_G80_CHIPSET, PIPE_SHADER_FRAGMENT);
3679 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3680 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET, PIPE_SHADER_TYPES);
3681 static const nir_shader_compiler_options gf100_fs_nir_shader_compiler_options =
3682 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET, PIPE_SHADER_FRAGMENT);
3683 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3684 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET, PIPE_SHADER_TYPES);
3685 static const nir_shader_compiler_options gm107_fs_nir_shader_compiler_options =
3686 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET, PIPE_SHADER_FRAGMENT);
3687 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3688 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET, PIPE_SHADER_TYPES);
3689 static const nir_shader_compiler_options gv100_fs_nir_shader_compiler_options =
3690 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET, PIPE_SHADER_FRAGMENT);
3691
3692 const nir_shader_compiler_options *
nv50_ir_nir_shader_compiler_options(int chipset,uint8_t shader_type)3693 nv50_ir_nir_shader_compiler_options(int chipset, uint8_t shader_type)
3694 {
3695 if (chipset >= NVISA_GV100_CHIPSET) {
3696 if (shader_type == PIPE_SHADER_FRAGMENT) {
3697 return &gv100_fs_nir_shader_compiler_options;
3698 } else {
3699 return &gv100_nir_shader_compiler_options;
3700 }
3701 }
3702
3703 if (chipset >= NVISA_GM107_CHIPSET) {
3704 if (shader_type == PIPE_SHADER_FRAGMENT) {
3705 return &gm107_fs_nir_shader_compiler_options;
3706 } else {
3707 return &gm107_nir_shader_compiler_options;
3708 }
3709 }
3710
3711 if (chipset >= NVISA_GF100_CHIPSET) {
3712 if (shader_type == PIPE_SHADER_FRAGMENT) {
3713 return &gf100_fs_nir_shader_compiler_options;
3714 } else {
3715 return &gf100_nir_shader_compiler_options;
3716 }
3717 }
3718
3719 if (shader_type == PIPE_SHADER_FRAGMENT) {
3720 return &g80_fs_nir_shader_compiler_options;
3721 } else {
3722 return &g80_nir_shader_compiler_options;
3723 }
3724 }
3725