1 /*
2 * Copyright 2009 Nicolai Haehnle.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef RADEON_OPCODES_H
7 #define RADEON_OPCODES_H
8
9 #include <assert.h>
10
11 /**
12 * Opcodes understood by the Radeon compiler.
13 */
14 typedef enum {
15 RC_OPCODE_NOP = 0,
16 RC_OPCODE_ILLEGAL_OPCODE,
17
18 /** vec4 instruction: dst.c = src0.c + src1.c; */
19 RC_OPCODE_ADD,
20
21 /** special instruction: load address register
22 * dst.x = floor(src.x), where dst must be an address register */
23 RC_OPCODE_ARL,
24
25 /** special instruction: load address register with round
26 * dst.x = round(src.x), where dst must be an address register */
27 RC_OPCODE_ARR,
28
29 /** vec4 instruction: dst.c = src0.c < 0.0 ? src1.c : src2.c */
30 RC_OPCODE_CMP,
31
32 /** vec4 instruction: dst.c = src2.c > 0.5 ? src0.c : src1.c */
33 RC_OPCODE_CND,
34
35 /** scalar instruction: dst = cos(src0.x) */
36 RC_OPCODE_COS,
37
38 /** special instruction: take vec4 partial derivative in X direction
39 * dst.c = d src0.c / dx */
40 RC_OPCODE_DDX,
41
42 /** special instruction: take vec4 partial derivative in Y direction
43 * dst.c = d src0.c / dy */
44 RC_OPCODE_DDY,
45
46 /** scalar instruction: dst = src0.x*src1.x + src0.y*src1.y */
47 RC_OPCODE_DP2,
48
49 /** scalar instruction: dst = src0.x*src1.x + src0.y*src1.y + src0.z*src1.z */
50 RC_OPCODE_DP3,
51
52 /** scalar instruction: dst = src0.x*src1.x + src0.y*src1.y + src0.z*src1.z + src0.w*src1.w */
53 RC_OPCODE_DP4,
54
55 /** special instruction, see ARB_fragment_program */
56 RC_OPCODE_DST,
57
58 /** scalar instruction: dst = 2**src0.x */
59 RC_OPCODE_EX2,
60
61 /** special instruction, see ARB_vertex_program */
62 RC_OPCODE_EXP,
63
64 /** vec4 instruction: dst.c = src0.c - floor(src0.c) */
65 RC_OPCODE_FRC,
66
67 /** special instruction: stop execution if any component of src0 is negative */
68 RC_OPCODE_KIL,
69
70 /** scalar instruction: dst = log_2(src0.x) */
71 RC_OPCODE_LG2,
72
73 /** special instruction, see ARB_vertex_program */
74 RC_OPCODE_LIT,
75
76 /** special instruction, see ARB_vertex_program */
77 RC_OPCODE_LOG,
78
79 /** vec4 instruction: dst.c = src0.c*src1.c + src2.c */
80 RC_OPCODE_MAD,
81
82 /** vec4 instruction: dst.c = max(src0.c, src1.c) */
83 RC_OPCODE_MAX,
84
85 /** vec4 instruction: dst.c = min(src0.c, src1.c) */
86 RC_OPCODE_MIN,
87
88 /** vec4 instruction: dst.c = src0.c */
89 RC_OPCODE_MOV,
90
91 /** vec4 instruction: dst.c = src0.c*src1.c */
92 RC_OPCODE_MUL,
93
94 /** scalar instruction: dst = src0.x ** src1.x */
95 RC_OPCODE_POW,
96
97 /** scalar instruction: dst = 1 / src0.x */
98 RC_OPCODE_RCP,
99
100 /** vec4 instruction: dst.c = floor(src0.c + 0.5) */
101 RC_OPCODE_ROUND,
102
103 /** scalar instruction: dst = 1 / sqrt(src0.x) */
104 RC_OPCODE_RSQ,
105
106 /** vec4 instruction: dst.c = (src0.c == src1.c) ? 1.0 : 0.0 */
107 RC_OPCODE_SEQ,
108
109 /** vec4 instruction: dst.c = (src0.c >= src1.c) ? 1.0 : 0.0 */
110 RC_OPCODE_SGE,
111
112 /** scalar instruction: dst = sin(src0.x) */
113 RC_OPCODE_SIN,
114
115 /** vec4 instruction: dst.c = (src0.c < src1.c) ? 1.0 : 0.0 */
116 RC_OPCODE_SLT,
117
118 /** vec4 instruction: dst.c = (src0.c != src1.c) ? 1.0 : 0.0 */
119 RC_OPCODE_SNE,
120
121 RC_OPCODE_TEX,
122 RC_OPCODE_TXB,
123 RC_OPCODE_TXD,
124 RC_OPCODE_TXL,
125 RC_OPCODE_TXP,
126
127 /** branch instruction:
128 * If src0.x != 0.0, continue with the next instruction;
129 * otherwise, jump to matching RC_OPCODE_ELSE or RC_OPCODE_ENDIF.
130 */
131 RC_OPCODE_IF,
132
133 /** branch instruction: jump to matching RC_OPCODE_ENDIF */
134 RC_OPCODE_ELSE,
135
136 /** branch instruction: has no effect */
137 RC_OPCODE_ENDIF,
138
139 RC_OPCODE_BGNLOOP,
140
141 RC_OPCODE_BRK,
142
143 RC_OPCODE_ENDLOOP,
144
145 RC_OPCODE_CONT,
146
147 /** special instruction, used in R300-R500 fragment program pair instructions
148 * indicates that the result of the alpha operation shall be replicated
149 * across all other channels */
150 RC_OPCODE_REPL_ALPHA,
151
152 /** special instruction, used in R300-R500 fragment programs
153 * to indicate the start of a block of texture instructions that
154 * can run simultaneously. */
155 RC_OPCODE_BEGIN_TEX,
156
157 /** Stop execution of the shader (GLSL discard) */
158 RC_OPCODE_KILP,
159
160 /* Vertex shader CF Instructions */
161 RC_ME_PRED_SEQ,
162 RC_ME_PRED_SGT,
163 RC_ME_PRED_SGE,
164 RC_ME_PRED_SNEQ,
165 RC_ME_PRED_SET_CLR,
166 RC_ME_PRED_SET_INV,
167 RC_ME_PRED_SET_POP,
168 RC_ME_PRED_SET_RESTORE,
169
170 RC_VE_PRED_SEQ_PUSH,
171 RC_VE_PRED_SGT_PUSH,
172 RC_VE_PRED_SGE_PUSH,
173 RC_VE_PRED_SNEQ_PUSH,
174
175 MAX_RC_OPCODE
176 } rc_opcode;
177
178
179 struct rc_opcode_info {
180 rc_opcode Opcode;
181 const char * Name;
182
183 /** true if the instruction reads from a texture.
184 *
185 * \note This is false for the KIL instruction, even though KIL is
186 * a texture instruction from a hardware point of view. */
187 unsigned int HasTexture:1;
188
189 unsigned int NumSrcRegs:2;
190 unsigned int HasDstReg:1;
191
192 /** true if this instruction affects control flow */
193 unsigned int IsFlowControl:1;
194
195 /** true if this is a vector instruction that operates on components in parallel
196 * without any cross-component interaction */
197 unsigned int IsComponentwise:1;
198
199 /** true if this instruction sources only its operands X components
200 * to compute one result which is smeared across all output channels */
201 unsigned int IsStandardScalar:1;
202 };
203
204 extern const struct rc_opcode_info rc_opcodes[MAX_RC_OPCODE];
205
rc_get_opcode_info(rc_opcode opcode)206 static inline const struct rc_opcode_info * rc_get_opcode_info(rc_opcode opcode)
207 {
208 assert((unsigned int)opcode < MAX_RC_OPCODE);
209 assert(rc_opcodes[opcode].Opcode == opcode);
210
211 return &rc_opcodes[opcode];
212 }
213
214 struct rc_instruction;
215
216 void rc_compute_sources_for_writemask(
217 const struct rc_instruction *inst,
218 unsigned int writemask,
219 unsigned int *srcmasks);
220
221 #endif /* RADEON_OPCODES_H */
222