1 /*
2 * Copyright © 2013 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef INSTR_A3XX_H_
7 #define INSTR_A3XX_H_
8
9 #define PACKED __attribute__((__packed__))
10
11 #include <assert.h>
12 #include <stdbool.h>
13 #include <stdint.h>
14 #include <stdio.h>
15
16 /* clang-format off */
17 void ir3_assert_handler(const char *expr, const char *file, int line,
18 const char *func) __attribute__((weak)) __attribute__((__noreturn__));
19 /* clang-format on */
20
21 /* A wrapper for assert() that allows overriding handling of a failed
22 * assert. This is needed for tools like crashdec which can want to
23 * attempt to disassemble memory that might not actually be valid
24 * instructions.
25 */
26 #define ir3_assert(expr) \
27 do { \
28 if (!(expr)) { \
29 if (ir3_assert_handler) { \
30 ir3_assert_handler(#expr, __FILE__, __LINE__, __func__); \
31 } \
32 assert(expr); \
33 } \
34 } while (0)
35 /* size of largest OPC field of all the instruction categories: */
36 #define NOPC_BITS 7
37
38 #define _OPC(cat, opc) (((cat) << NOPC_BITS) | opc)
39
40 /* clang-format off */
41 typedef enum {
42 /* category 0: */
43 OPC_NOP = _OPC(0, 0),
44 OPC_JUMP = _OPC(0, 2),
45 OPC_CALL = _OPC(0, 3),
46 OPC_RET = _OPC(0, 4),
47 OPC_KILL = _OPC(0, 5),
48 OPC_END = _OPC(0, 6),
49 OPC_EMIT = _OPC(0, 7),
50 OPC_CUT = _OPC(0, 8),
51 OPC_CHMASK = _OPC(0, 9),
52 OPC_CHSH = _OPC(0, 10),
53 OPC_FLOW_REV = _OPC(0, 11),
54
55 OPC_BKT = _OPC(0, 16),
56 OPC_STKS = _OPC(0, 17),
57 OPC_STKR = _OPC(0, 18),
58 OPC_XSET = _OPC(0, 19),
59 OPC_XCLR = _OPC(0, 20),
60 OPC_GETONE = _OPC(0, 21),
61 OPC_DBG = _OPC(0, 22),
62 OPC_SHPS = _OPC(0, 23), /* shader prologue start */
63 OPC_SHPE = _OPC(0, 24), /* shader prologue end */
64 OPC_GETLAST = _OPC(0, 25),
65
66 OPC_PREDT = _OPC(0, 29), /* predicated true */
67 OPC_PREDF = _OPC(0, 30), /* predicated false */
68 OPC_PREDE = _OPC(0, 31), /* predicated end */
69
70 /* Logical opcodes for different branch instruction variations: */
71 OPC_BR = _OPC(0, 40),
72 OPC_BRAO = _OPC(0, 41),
73 OPC_BRAA = _OPC(0, 42),
74 OPC_BRAC = _OPC(0, 43),
75 OPC_BANY = _OPC(0, 44),
76 OPC_BALL = _OPC(0, 45),
77 OPC_BRAX = _OPC(0, 46),
78
79 /* Logical opcode to distinguish kill and demote */
80 OPC_DEMOTE = _OPC(0, 47),
81
82 /* category 1: */
83 OPC_MOV = _OPC(1, 0),
84 OPC_MOVP = _OPC(1, 1),
85 /* swz, gat, sct */
86 OPC_MOVMSK = _OPC(1, 3),
87
88 /* Virtual opcodes for instructions differentiated via a "sub-opcode" that
89 * replaces the repeat field:
90 */
91 OPC_SWZ = _OPC(1, 4),
92 OPC_GAT = _OPC(1, 5),
93 OPC_SCT = _OPC(1, 6),
94
95 /* Logical opcodes for different variants of mov: */
96 OPC_MOV_IMMED = _OPC(1, 40),
97 OPC_MOV_CONST = _OPC(1, 41),
98 OPC_MOV_GPR = _OPC(1, 42),
99 OPC_MOV_RELGPR = _OPC(1, 43),
100 OPC_MOV_RELCONST = _OPC(1, 44),
101
102 /* Macros that expand to an if statement + move */
103 OPC_BALLOT_MACRO = _OPC(1, 50),
104 OPC_ANY_MACRO = _OPC(1, 51),
105 OPC_ALL_MACRO = _OPC(1, 52),
106 OPC_ELECT_MACRO = _OPC(1, 53),
107 OPC_READ_COND_MACRO = _OPC(1, 54),
108 OPC_READ_FIRST_MACRO = _OPC(1, 55),
109 OPC_SHPS_MACRO = _OPC(1, 56),
110
111 /* Macros that expand to a loop */
112 OPC_SCAN_MACRO = _OPC(1, 58),
113 OPC_SCAN_CLUSTERS_MACRO = _OPC(1, 60),
114
115 /* category 2: */
116 OPC_ADD_F = _OPC(2, 0),
117 OPC_MIN_F = _OPC(2, 1),
118 OPC_MAX_F = _OPC(2, 2),
119 OPC_MUL_F = _OPC(2, 3),
120 OPC_SIGN_F = _OPC(2, 4),
121 OPC_CMPS_F = _OPC(2, 5),
122 OPC_ABSNEG_F = _OPC(2, 6),
123 OPC_CMPV_F = _OPC(2, 7),
124 /* 8 - invalid */
125 OPC_FLOOR_F = _OPC(2, 9),
126 OPC_CEIL_F = _OPC(2, 10),
127 OPC_RNDNE_F = _OPC(2, 11),
128 OPC_RNDAZ_F = _OPC(2, 12),
129 OPC_TRUNC_F = _OPC(2, 13),
130 /* 14-15 - invalid */
131 OPC_ADD_U = _OPC(2, 16),
132 OPC_ADD_S = _OPC(2, 17),
133 OPC_SUB_U = _OPC(2, 18),
134 OPC_SUB_S = _OPC(2, 19),
135 OPC_CMPS_U = _OPC(2, 20),
136 OPC_CMPS_S = _OPC(2, 21),
137 OPC_MIN_U = _OPC(2, 22),
138 OPC_MIN_S = _OPC(2, 23),
139 OPC_MAX_U = _OPC(2, 24),
140 OPC_MAX_S = _OPC(2, 25),
141 OPC_ABSNEG_S = _OPC(2, 26),
142 /* 27 - invalid */
143 OPC_AND_B = _OPC(2, 28),
144 OPC_OR_B = _OPC(2, 29),
145 OPC_NOT_B = _OPC(2, 30),
146 OPC_XOR_B = _OPC(2, 31),
147 /* 32 - invalid */
148 OPC_CMPV_U = _OPC(2, 33),
149 OPC_CMPV_S = _OPC(2, 34),
150 /* 35-47 - invalid */
151 OPC_MUL_U24 = _OPC(2, 48), /* 24b mul into 32b result */
152 OPC_MUL_S24 = _OPC(2, 49), /* 24b mul into 32b result with sign extension */
153 OPC_MULL_U = _OPC(2, 50),
154 OPC_BFREV_B = _OPC(2, 51),
155 OPC_CLZ_S = _OPC(2, 52),
156 OPC_CLZ_B = _OPC(2, 53),
157 OPC_SHL_B = _OPC(2, 54),
158 OPC_SHR_B = _OPC(2, 55),
159 OPC_ASHR_B = _OPC(2, 56),
160 OPC_BARY_F = _OPC(2, 57),
161 OPC_MGEN_B = _OPC(2, 58),
162 OPC_GETBIT_B = _OPC(2, 59),
163 OPC_SETRM = _OPC(2, 60),
164 OPC_CBITS_B = _OPC(2, 61),
165 OPC_SHB = _OPC(2, 62),
166 OPC_MSAD = _OPC(2, 63),
167 OPC_FLAT_B = _OPC(2, 64),
168
169 /* category 3: */
170 OPC_MAD_U16 = _OPC(3, 0),
171 OPC_MADSH_U16 = _OPC(3, 1),
172 OPC_MAD_S16 = _OPC(3, 2),
173 OPC_MADSH_M16 = _OPC(3, 3), /* should this be .s16? */
174 OPC_MAD_U24 = _OPC(3, 4),
175 OPC_MAD_S24 = _OPC(3, 5),
176 OPC_MAD_F16 = _OPC(3, 6),
177 OPC_MAD_F32 = _OPC(3, 7),
178 OPC_SEL_B16 = _OPC(3, 8),
179 OPC_SEL_B32 = _OPC(3, 9),
180 OPC_SEL_S16 = _OPC(3, 10),
181 OPC_SEL_S32 = _OPC(3, 11),
182 OPC_SEL_F16 = _OPC(3, 12),
183 OPC_SEL_F32 = _OPC(3, 13),
184 OPC_SAD_S16 = _OPC(3, 14),
185 OPC_SAD_S32 = _OPC(3, 15),
186 OPC_SHRM = _OPC(3, 16),
187 OPC_SHLM = _OPC(3, 17),
188 OPC_SHRG = _OPC(3, 18),
189 OPC_SHLG = _OPC(3, 19),
190 OPC_ANDG = _OPC(3, 20),
191 OPC_DP2ACC = _OPC(3, 21),
192 OPC_DP4ACC = _OPC(3, 22),
193 OPC_WMM = _OPC(3, 23),
194 OPC_WMM_ACCU = _OPC(3, 24),
195
196 /* category 4: */
197 OPC_RCP = _OPC(4, 0),
198 OPC_RSQ = _OPC(4, 1),
199 OPC_LOG2 = _OPC(4, 2),
200 OPC_EXP2 = _OPC(4, 3),
201 OPC_SIN = _OPC(4, 4),
202 OPC_COS = _OPC(4, 5),
203 OPC_SQRT = _OPC(4, 6),
204 /* NOTE that these are 8+opc from their highp equivs, so it's possible
205 * that the high order bit in the opc field has been repurposed for
206 * half-precision use? But note that other ops (rcp/lsin/cos/sqrt)
207 * still use the same opc as highp
208 */
209 OPC_HRSQ = _OPC(4, 9),
210 OPC_HLOG2 = _OPC(4, 10),
211 OPC_HEXP2 = _OPC(4, 11),
212
213 /* category 5: */
214 OPC_ISAM = _OPC(5, 0),
215 OPC_ISAML = _OPC(5, 1),
216 OPC_ISAMM = _OPC(5, 2),
217 OPC_SAM = _OPC(5, 3),
218 OPC_SAMB = _OPC(5, 4),
219 OPC_SAML = _OPC(5, 5),
220 OPC_SAMGQ = _OPC(5, 6),
221 OPC_GETLOD = _OPC(5, 7),
222 OPC_CONV = _OPC(5, 8),
223 OPC_CONVM = _OPC(5, 9),
224 OPC_GETSIZE = _OPC(5, 10),
225 OPC_GETBUF = _OPC(5, 11),
226 OPC_GETPOS = _OPC(5, 12),
227 OPC_GETINFO = _OPC(5, 13),
228 OPC_DSX = _OPC(5, 14),
229 OPC_DSY = _OPC(5, 15),
230 OPC_GATHER4R = _OPC(5, 16),
231 OPC_GATHER4G = _OPC(5, 17),
232 OPC_GATHER4B = _OPC(5, 18),
233 OPC_GATHER4A = _OPC(5, 19),
234 OPC_SAMGP0 = _OPC(5, 20),
235 OPC_SAMGP1 = _OPC(5, 21),
236 OPC_SAMGP2 = _OPC(5, 22),
237 OPC_SAMGP3 = _OPC(5, 23),
238 OPC_DSXPP_1 = _OPC(5, 24),
239 OPC_DSYPP_1 = _OPC(5, 25),
240 OPC_RGETPOS = _OPC(5, 26),
241 OPC_RGETINFO = _OPC(5, 27),
242 OPC_BRCST_ACTIVE = _OPC(5, 28),
243 OPC_QUAD_SHUFFLE_BRCST = _OPC(5, 29),
244 OPC_QUAD_SHUFFLE_HORIZ = _OPC(5, 30),
245 OPC_QUAD_SHUFFLE_VERT = _OPC(5, 31),
246 OPC_QUAD_SHUFFLE_DIAG = _OPC(5, 32),
247 OPC_TCINV = _OPC(5, 33),
248 /* cat5 meta instructions, placed above the cat5 opc field's size */
249 OPC_DSXPP_MACRO = _OPC(5, 35),
250 OPC_DSYPP_MACRO = _OPC(5, 36),
251
252 /* category 6: */
253 OPC_LDG = _OPC(6, 0), /* load-global */
254 OPC_LDL = _OPC(6, 1),
255 OPC_LDP = _OPC(6, 2),
256 OPC_STG = _OPC(6, 3), /* store-global */
257 OPC_STL = _OPC(6, 4),
258 OPC_STP = _OPC(6, 5),
259 OPC_LDIB = _OPC(6, 6),
260 OPC_G2L = _OPC(6, 7),
261 OPC_L2G = _OPC(6, 8),
262 OPC_PREFETCH = _OPC(6, 9),
263 OPC_LDLW = _OPC(6, 10),
264 OPC_STLW = _OPC(6, 11),
265 OPC_RESFMT = _OPC(6, 14),
266 OPC_RESINFO = _OPC(6, 15),
267 OPC_ATOMIC_ADD = _OPC(6, 16),
268 OPC_ATOMIC_SUB = _OPC(6, 17),
269 OPC_ATOMIC_XCHG = _OPC(6, 18),
270 OPC_ATOMIC_INC = _OPC(6, 19),
271 OPC_ATOMIC_DEC = _OPC(6, 20),
272 OPC_ATOMIC_CMPXCHG = _OPC(6, 21),
273 OPC_ATOMIC_MIN = _OPC(6, 22),
274 OPC_ATOMIC_MAX = _OPC(6, 23),
275 OPC_ATOMIC_AND = _OPC(6, 24),
276 OPC_ATOMIC_OR = _OPC(6, 25),
277 OPC_ATOMIC_XOR = _OPC(6, 26),
278 OPC_LDGB = _OPC(6, 27),
279 OPC_STGB = _OPC(6, 28),
280 OPC_STIB = _OPC(6, 29),
281 OPC_LDC = _OPC(6, 30),
282 OPC_LDLV = _OPC(6, 31),
283 OPC_PIPR = _OPC(6, 32), /* ??? */
284 OPC_PIPC = _OPC(6, 33), /* ??? */
285 OPC_EMIT2 = _OPC(6, 34), /* ??? */
286 OPC_ENDLS = _OPC(6, 35), /* ??? */
287 OPC_GETSPID = _OPC(6, 36), /* SP ID */
288 OPC_GETWID = _OPC(6, 37), /* wavefront ID */
289 OPC_GETFIBERID = _OPC(6, 38), /* fiber ID */
290
291 /* Logical opcodes for things that differ in a6xx+ */
292 OPC_STC = _OPC(6, 40),
293 OPC_RESINFO_B = _OPC(6, 41),
294 OPC_LDIB_B = _OPC(6, 42),
295 OPC_STIB_B = _OPC(6, 43),
296
297 /* Logical opcodes for different atomic instruction variations: */
298 OPC_ATOMIC_B_ADD = _OPC(6, 44),
299 OPC_ATOMIC_B_SUB = _OPC(6, 45),
300 OPC_ATOMIC_B_XCHG = _OPC(6, 46),
301 OPC_ATOMIC_B_INC = _OPC(6, 47),
302 OPC_ATOMIC_B_DEC = _OPC(6, 48),
303 OPC_ATOMIC_B_CMPXCHG = _OPC(6, 49),
304 OPC_ATOMIC_B_MIN = _OPC(6, 50),
305 OPC_ATOMIC_B_MAX = _OPC(6, 51),
306 OPC_ATOMIC_B_AND = _OPC(6, 52),
307 OPC_ATOMIC_B_OR = _OPC(6, 53),
308 OPC_ATOMIC_B_XOR = _OPC(6, 54),
309
310 OPC_ATOMIC_S_ADD = _OPC(6, 55),
311 OPC_ATOMIC_S_SUB = _OPC(6, 56),
312 OPC_ATOMIC_S_XCHG = _OPC(6, 57),
313 OPC_ATOMIC_S_INC = _OPC(6, 58),
314 OPC_ATOMIC_S_DEC = _OPC(6, 59),
315 OPC_ATOMIC_S_CMPXCHG = _OPC(6, 60),
316 OPC_ATOMIC_S_MIN = _OPC(6, 61),
317 OPC_ATOMIC_S_MAX = _OPC(6, 62),
318 OPC_ATOMIC_S_AND = _OPC(6, 63),
319 OPC_ATOMIC_S_OR = _OPC(6, 64),
320 OPC_ATOMIC_S_XOR = _OPC(6, 65),
321
322 OPC_ATOMIC_G_ADD = _OPC(6, 66),
323 OPC_ATOMIC_G_SUB = _OPC(6, 67),
324 OPC_ATOMIC_G_XCHG = _OPC(6, 68),
325 OPC_ATOMIC_G_INC = _OPC(6, 69),
326 OPC_ATOMIC_G_DEC = _OPC(6, 70),
327 OPC_ATOMIC_G_CMPXCHG = _OPC(6, 71),
328 OPC_ATOMIC_G_MIN = _OPC(6, 72),
329 OPC_ATOMIC_G_MAX = _OPC(6, 73),
330 OPC_ATOMIC_G_AND = _OPC(6, 74),
331 OPC_ATOMIC_G_OR = _OPC(6, 75),
332 OPC_ATOMIC_G_XOR = _OPC(6, 76),
333
334 OPC_LDG_A = _OPC(6, 77),
335 OPC_STG_A = _OPC(6, 78),
336
337 OPC_SPILL_MACRO = _OPC(6, 79),
338 OPC_RELOAD_MACRO = _OPC(6, 80),
339
340 OPC_LDC_K = _OPC(6, 81),
341 OPC_STSC = _OPC(6, 82),
342 OPC_LDG_K = _OPC(6, 83),
343
344 /* Macros that expand to an stsc at the start of the preamble.
345 * It loads into const file and should not be optimized in any way.
346 */
347 OPC_PUSH_CONSTS_LOAD_MACRO = _OPC(6, 84),
348
349 /* category 7: */
350 OPC_BAR = _OPC(7, 0),
351 OPC_FENCE = _OPC(7, 1),
352 OPC_SLEEP = _OPC(7, 2),
353 OPC_ICINV = _OPC(7, 3),
354 OPC_DCCLN = _OPC(7, 4),
355 OPC_DCINV = _OPC(7, 5),
356 OPC_DCFLU = _OPC(7, 6),
357
358 OPC_LOCK = _OPC(7, 7),
359 OPC_UNLOCK = _OPC(7, 8),
360
361 OPC_ALIAS = _OPC(7, 9),
362
363 OPC_CCINV = _OPC(7, 10),
364
365 /* meta instructions (category 8): */
366 #define OPC_META 8
367 /* placeholder instr to mark shader inputs: */
368 OPC_META_INPUT = _OPC(OPC_META, 0),
369 /* The "collect" and "split" instructions are used for keeping
370 * track of instructions that write to multiple dst registers
371 * (split) like texture sample instructions, or read multiple
372 * consecutive scalar registers (collect) (bary.f, texture samp)
373 *
374 * A "split" extracts a scalar component from a vecN, and a
375 * "collect" gathers multiple scalar components into a vecN
376 */
377 OPC_META_SPLIT = _OPC(OPC_META, 2),
378 OPC_META_COLLECT = _OPC(OPC_META, 3),
379
380 /* placeholder for texture fetches that run before FS invocation
381 * starts:
382 */
383 OPC_META_TEX_PREFETCH = _OPC(OPC_META, 4),
384
385 /* Parallel copies have multiple destinations, and copy each destination
386 * to its corresponding source. This happens "in parallel," meaning that
387 * it happens as-if every source is read first and then every destination
388 * is stored. These are produced in RA when register shuffling is
389 * required, and then lowered away immediately afterwards.
390 */
391 OPC_META_PARALLEL_COPY = _OPC(OPC_META, 5),
392 OPC_META_PHI = _OPC(OPC_META, 6),
393 /*
394 * A manually encoded opcode
395 */
396 OPC_META_RAW = _OPC(OPC_META, 7),
397 } opc_t;
398 /* clang-format on */
399
400 #define opc_cat(opc) ((int)((opc) >> NOPC_BITS))
401 #define opc_op(opc) ((unsigned)((opc) & ((1 << NOPC_BITS) - 1)))
402
403 const char *disasm_a3xx_instr_name(opc_t opc);
404
405 typedef enum {
406 TYPE_F16 = 0,
407 TYPE_F32 = 1,
408 TYPE_U16 = 2,
409 TYPE_U32 = 3,
410 TYPE_S16 = 4,
411 TYPE_S32 = 5,
412 TYPE_U8 = 6,
413 TYPE_U8_32 = 7,
414 } type_t;
415
416 static inline uint32_t
type_size(type_t type)417 type_size(type_t type)
418 {
419 switch (type) {
420 case TYPE_F32:
421 case TYPE_U32:
422 case TYPE_U8_32:
423 case TYPE_S32:
424 return 32;
425 case TYPE_F16:
426 case TYPE_U16:
427 case TYPE_S16:
428 return 16;
429 case TYPE_U8:
430 return 8;
431 default:
432 ir3_assert(0); /* invalid type */
433 return 0;
434 }
435 }
436
437 static inline type_t
type_uint_size(unsigned bit_size)438 type_uint_size(unsigned bit_size)
439 {
440 switch (bit_size) {
441 case 8: return TYPE_U8;
442 case 1: /* 1b bools are treated as normal half-regs */
443 case 16: return TYPE_U16;
444 case 32: return TYPE_U32;
445 default:
446 ir3_assert(0); /* invalid size */
447 return (type_t)0;
448 }
449 }
450
451 static inline type_t
type_float_size(unsigned bit_size)452 type_float_size(unsigned bit_size)
453 {
454 switch (bit_size) {
455 case 16: return TYPE_F16;
456 case 32: return TYPE_F32;
457 default:
458 ir3_assert(0); /* invalid size */
459 return (type_t)0;
460 }
461 }
462
463 static inline int
type_float(type_t type)464 type_float(type_t type)
465 {
466 return (type == TYPE_F32) || (type == TYPE_F16);
467 }
468
469 static inline int
type_uint(type_t type)470 type_uint(type_t type)
471 {
472 return (type == TYPE_U32) || (type == TYPE_U16) || (type == TYPE_U8) || (type == TYPE_U8_32);
473 }
474
475 static inline int
type_sint(type_t type)476 type_sint(type_t type)
477 {
478 return (type == TYPE_S32) || (type == TYPE_S16);
479 }
480
481 typedef enum {
482 ROUND_ZERO = 0,
483 ROUND_EVEN = 1,
484 ROUND_POS_INF = 2,
485 ROUND_NEG_INF = 3,
486 } round_t;
487
488 /* comp:
489 * 0 - x
490 * 1 - y
491 * 2 - z
492 * 3 - w
493 */
494 static inline uint32_t
regid(int num,int comp)495 regid(int num, int comp)
496 {
497 return (num << 2) | (comp & 0x3);
498 }
499
500 #define INVALID_REG regid(63, 0)
501 #define VALIDREG(r) ((r) != INVALID_REG)
502 #define CONDREG(r, val) COND(VALIDREG(r), (val))
503
504 /* special registers: */
505 #define REG_A0 61 /* address register */
506 #define REG_P0 62 /* predicate register */
507 #define REG_P0_X regid(REG_P0, 0) /* p0.x */
508
509 #define INVALID_CONST_REG UINT16_MAX
510
511 /* With is_bindless_s2en = 1, this determines whether bindless is enabled and
512 * if so, how to get the (base, index) pair for both sampler and texture.
513 * There is a single base embedded in the instruction, which is always used
514 * for the texture.
515 */
516 typedef enum {
517 /* Use traditional GL binding model, get texture and sampler index from src3
518 * which is presumed to be uniform on a4xx+ (a3xx doesn't have the other
519 * modes, but does handle non-uniform indexing).
520 */
521 CAT5_UNIFORM = 0,
522
523 /* The sampler base comes from the low 3 bits of a1.x, and the sampler
524 * and texture index come from src3 which is presumed to be uniform.
525 */
526 CAT5_BINDLESS_A1_UNIFORM = 1,
527
528 /* The texture and sampler share the same base, and the sampler and
529 * texture index come from src3 which is *not* presumed to be uniform.
530 */
531 CAT5_BINDLESS_NONUNIFORM = 2,
532
533 /* The sampler base comes from the low 3 bits of a1.x, and the sampler
534 * and texture index come from src3 which is *not* presumed to be
535 * uniform.
536 */
537 CAT5_BINDLESS_A1_NONUNIFORM = 3,
538
539 /* Use traditional GL binding model, get texture and sampler index
540 * from src3 which is *not* presumed to be uniform.
541 */
542 CAT5_NONUNIFORM = 4,
543
544 /* The texture and sampler share the same base, and the sampler and
545 * texture index come from src3 which is presumed to be uniform.
546 */
547 CAT5_BINDLESS_UNIFORM = 5,
548
549 /* The texture and sampler share the same base, get sampler index from low
550 * 4 bits of src3 and texture index from high 4 bits.
551 */
552 CAT5_BINDLESS_IMM = 6,
553
554 /* The sampler base comes from the low 3 bits of a1.x, and the texture
555 * index comes from the next 8 bits of a1.x. The sampler index is an
556 * immediate in src3.
557 */
558 CAT5_BINDLESS_A1_IMM = 7,
559 } cat5_desc_mode_t;
560
561 /* Similar to cat5_desc_mode_t, describes how the descriptor is loaded.
562 */
563 typedef enum {
564 /* Use old GL binding model with an immediate index. */
565 CAT6_IMM = 0,
566
567 CAT6_UNIFORM = 1,
568
569 CAT6_NONUNIFORM = 2,
570
571 /* Use the bindless model, with an immediate index.
572 */
573 CAT6_BINDLESS_IMM = 4,
574
575 /* Use the bindless model, with a uniform register index.
576 */
577 CAT6_BINDLESS_UNIFORM = 5,
578
579 /* Use the bindless model, with a register index that isn't guaranteed
580 * to be uniform. This presumably checks if the indices are equal and
581 * splits up the load/store, because it works the way you would
582 * expect.
583 */
584 CAT6_BINDLESS_NONUNIFORM = 6,
585 } cat6_desc_mode_t;
586
587 static inline bool
is_sat_compatible(opc_t opc)588 is_sat_compatible(opc_t opc)
589 {
590 /* On a6xx saturation doesn't work on cat4 */
591 if (opc_cat(opc) != 2 && opc_cat(opc) != 3)
592 return false;
593
594 switch (opc) {
595 /* On a3xx and a6xx saturation doesn't work on bary.f/flat.b */
596 case OPC_BARY_F:
597 case OPC_FLAT_B:
598 /* On a6xx saturation doesn't work on sel.* */
599 case OPC_SEL_B16:
600 case OPC_SEL_B32:
601 case OPC_SEL_S16:
602 case OPC_SEL_S32:
603 case OPC_SEL_F16:
604 case OPC_SEL_F32:
605 return false;
606 default:
607 return true;
608 }
609 }
610
611 static inline bool
is_mad(opc_t opc)612 is_mad(opc_t opc)
613 {
614 switch (opc) {
615 case OPC_MAD_U16:
616 case OPC_MAD_S16:
617 case OPC_MAD_U24:
618 case OPC_MAD_S24:
619 case OPC_MAD_F16:
620 case OPC_MAD_F32:
621 return true;
622 default:
623 return false;
624 }
625 }
626
627 static inline bool
is_madsh(opc_t opc)628 is_madsh(opc_t opc)
629 {
630 switch (opc) {
631 case OPC_MADSH_U16:
632 case OPC_MADSH_M16:
633 return true;
634 default:
635 return false;
636 }
637 }
638
639 static inline bool
is_local_atomic(opc_t opc)640 is_local_atomic(opc_t opc)
641 {
642 switch (opc) {
643 case OPC_ATOMIC_ADD:
644 case OPC_ATOMIC_SUB:
645 case OPC_ATOMIC_XCHG:
646 case OPC_ATOMIC_INC:
647 case OPC_ATOMIC_DEC:
648 case OPC_ATOMIC_CMPXCHG:
649 case OPC_ATOMIC_MIN:
650 case OPC_ATOMIC_MAX:
651 case OPC_ATOMIC_AND:
652 case OPC_ATOMIC_OR:
653 case OPC_ATOMIC_XOR:
654 return true;
655 default:
656 return false;
657 }
658 }
659
660 static inline bool
is_global_a3xx_atomic(opc_t opc)661 is_global_a3xx_atomic(opc_t opc)
662 {
663 switch (opc) {
664 case OPC_ATOMIC_S_ADD:
665 case OPC_ATOMIC_S_SUB:
666 case OPC_ATOMIC_S_XCHG:
667 case OPC_ATOMIC_S_INC:
668 case OPC_ATOMIC_S_DEC:
669 case OPC_ATOMIC_S_CMPXCHG:
670 case OPC_ATOMIC_S_MIN:
671 case OPC_ATOMIC_S_MAX:
672 case OPC_ATOMIC_S_AND:
673 case OPC_ATOMIC_S_OR:
674 case OPC_ATOMIC_S_XOR:
675 return true;
676 default:
677 return false;
678 }
679 }
680
681 static inline bool
is_global_a6xx_atomic(opc_t opc)682 is_global_a6xx_atomic(opc_t opc)
683 {
684 switch (opc) {
685 case OPC_ATOMIC_G_ADD:
686 case OPC_ATOMIC_G_SUB:
687 case OPC_ATOMIC_G_XCHG:
688 case OPC_ATOMIC_G_INC:
689 case OPC_ATOMIC_G_DEC:
690 case OPC_ATOMIC_G_CMPXCHG:
691 case OPC_ATOMIC_G_MIN:
692 case OPC_ATOMIC_G_MAX:
693 case OPC_ATOMIC_G_AND:
694 case OPC_ATOMIC_G_OR:
695 case OPC_ATOMIC_G_XOR:
696 return true;
697 default:
698 return false;
699 }
700 }
701
702 static inline bool
is_bindless_atomic(opc_t opc)703 is_bindless_atomic(opc_t opc)
704 {
705 switch (opc) {
706 case OPC_ATOMIC_B_ADD:
707 case OPC_ATOMIC_B_SUB:
708 case OPC_ATOMIC_B_XCHG:
709 case OPC_ATOMIC_B_INC:
710 case OPC_ATOMIC_B_DEC:
711 case OPC_ATOMIC_B_CMPXCHG:
712 case OPC_ATOMIC_B_MIN:
713 case OPC_ATOMIC_B_MAX:
714 case OPC_ATOMIC_B_AND:
715 case OPC_ATOMIC_B_OR:
716 case OPC_ATOMIC_B_XOR:
717 return true;
718 default:
719 return false;
720 }
721 }
722
723 static inline bool
is_atomic(opc_t opc)724 is_atomic(opc_t opc)
725 {
726 return is_local_atomic(opc) || is_global_a3xx_atomic(opc) ||
727 is_global_a6xx_atomic(opc) || is_bindless_atomic(opc);
728 }
729
730 static inline bool
is_ssbo(opc_t opc)731 is_ssbo(opc_t opc)
732 {
733 switch (opc) {
734 case OPC_RESFMT:
735 case OPC_RESINFO:
736 case OPC_LDGB:
737 case OPC_STGB:
738 case OPC_STIB:
739 return true;
740 default:
741 return false;
742 }
743 }
744
745 static inline bool
is_isam(opc_t opc)746 is_isam(opc_t opc)
747 {
748 switch (opc) {
749 case OPC_ISAM:
750 case OPC_ISAML:
751 case OPC_ISAMM:
752 return true;
753 default:
754 return false;
755 }
756 }
757
758 static inline bool
is_cat2_float(opc_t opc)759 is_cat2_float(opc_t opc)
760 {
761 switch (opc) {
762 case OPC_ADD_F:
763 case OPC_MIN_F:
764 case OPC_MAX_F:
765 case OPC_MUL_F:
766 case OPC_SIGN_F:
767 case OPC_CMPS_F:
768 case OPC_ABSNEG_F:
769 case OPC_CMPV_F:
770 case OPC_FLOOR_F:
771 case OPC_CEIL_F:
772 case OPC_RNDNE_F:
773 case OPC_RNDAZ_F:
774 case OPC_TRUNC_F:
775 return true;
776
777 default:
778 return false;
779 }
780 }
781
782 static inline bool
is_cat3_float(opc_t opc)783 is_cat3_float(opc_t opc)
784 {
785 switch (opc) {
786 case OPC_MAD_F16:
787 case OPC_MAD_F32:
788 case OPC_SEL_F16:
789 case OPC_SEL_F32:
790 return true;
791 default:
792 return false;
793 }
794 }
795
796 #endif /* INSTR_A3XX_H_ */
797