1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef __PAN_IR_H
25 #define __PAN_IR_H
26
27 #include <stdint.h>
28 #include "compiler/nir/nir.h"
29 #include "util/hash_table.h"
30 #include "util/u_dynarray.h"
31
32 /* Indices for named (non-XFB) varyings that are present. These are packed
33 * tightly so they correspond to a bitfield present (P) indexed by (1 <<
34 * PAN_VARY_*). This has the nice property that you can lookup the buffer index
35 * of a given special field given a shift S by:
36 *
37 * idx = popcount(P & ((1 << S) - 1))
38 *
39 * That is... look at all of the varyings that come earlier and count them, the
40 * count is the new index since plus one. Likewise, the total number of special
41 * buffers required is simply popcount(P)
42 */
43
44 enum pan_special_varying {
45 PAN_VARY_GENERAL = 0,
46 PAN_VARY_POSITION = 1,
47 PAN_VARY_PSIZ = 2,
48 PAN_VARY_PNTCOORD = 3,
49 PAN_VARY_FACE = 4,
50 PAN_VARY_FRAGCOORD = 5,
51
52 /* Keep last */
53 PAN_VARY_MAX,
54 };
55
56 /* Maximum number of attribute descriptors required for varyings. These include
57 * up to MAX_VARYING source level varyings plus a descriptor each non-GENERAL
58 * special varying */
59 #define PAN_MAX_VARYINGS (MAX_VARYING + PAN_VARY_MAX - 1)
60
61 /* Special attribute slots for vertex builtins. Sort of arbitrary but let's be
62 * consistent with the blob so we can compare traces easier. */
63
64 enum { PAN_VERTEX_ID = 16, PAN_INSTANCE_ID = 17, PAN_MAX_ATTRIBUTE };
65
66 /* Architecturally, Bifrost/Valhall can address 128 FAU slots of 64-bits each.
67 * In practice, the maximum number of FAU slots is limited by implementation.
68 * All known Bifrost and Valhall devices limit to 64 FAU slots. Therefore the
69 * maximum number of 32-bit words is 128, since there are 2 words per FAU slot.
70 *
71 * Midgard can push at most 92 words, so this bound suffices. The Midgard
72 * compiler pushes less than this, as Midgard uses register-mapped uniforms
73 * instead of FAU, preventing large numbers of uniforms to be pushed for
74 * nontrivial programs.
75 */
76 #define PAN_MAX_PUSH 128
77
78 /* Architectural invariants (Midgard and Bifrost): UBO must be <= 2^16 bytes so
79 * an offset to a word must be < 2^16. There are less than 2^8 UBOs */
80
81 struct panfrost_ubo_word {
82 uint16_t ubo;
83 uint16_t offset;
84 };
85
86 struct panfrost_ubo_push {
87 unsigned count;
88 struct panfrost_ubo_word words[PAN_MAX_PUSH];
89 };
90
91 /* Helper for searching the above. Note this is O(N) to the number of pushed
92 * constants, do not run in the draw call hot path */
93
94 unsigned pan_lookup_pushed_ubo(struct panfrost_ubo_push *push, unsigned ubo,
95 unsigned offs);
96
97 struct panfrost_compile_inputs {
98 struct util_debug_callback *debug;
99
100 unsigned gpu_id;
101 bool is_blend, is_blit;
102 struct {
103 unsigned nr_samples;
104 uint64_t bifrost_blend_desc;
105 } blend;
106 bool no_idvs;
107 bool no_ubo_to_push;
108
109 /* Used on Valhall.
110 *
111 * Bit mask of special desktop-only varyings (e.g VARYING_SLOT_TEX0)
112 * written by the previous stage (fragment shader) or written by this
113 * stage (vertex shader). Bits are slots from gl_varying_slot.
114 *
115 * For modern APIs (GLES or VK), this should be 0.
116 */
117 uint32_t fixed_varying_mask;
118
119 union {
120 struct {
121 uint32_t rt_conv[8];
122 } bifrost;
123 };
124 };
125
126 struct pan_shader_varying {
127 gl_varying_slot location;
128 enum pipe_format format;
129 };
130
131 struct bifrost_shader_blend_info {
132 nir_alu_type type;
133 uint32_t return_offset;
134
135 /* mali_bifrost_register_file_format corresponding to nir_alu_type */
136 unsigned format;
137 };
138
139 /*
140 * Unpacked form of a v7 message preload descriptor, produced by the compiler's
141 * message preload optimization. By splitting out this struct, the compiler does
142 * not need to know about data structure packing, avoiding a dependency on
143 * GenXML.
144 */
145 struct bifrost_message_preload {
146 /* Whether to preload this message */
147 bool enabled;
148
149 /* Varying to load from */
150 unsigned varying_index;
151
152 /* Register type, FP32 otherwise */
153 bool fp16;
154
155 /* Number of components, ignored if texturing */
156 unsigned num_components;
157
158 /* If texture is set, performs a texture instruction according to
159 * texture_index, skip, and zero_lod. If texture is unset, only the
160 * varying load is performed.
161 */
162 bool texture, skip, zero_lod;
163 unsigned texture_index;
164 };
165
166 struct bifrost_shader_info {
167 struct bifrost_shader_blend_info blend[8];
168 nir_alu_type blend_src1_type;
169 bool wait_6, wait_7;
170 struct bifrost_message_preload messages[2];
171
172 /* Whether any flat varyings are loaded. This may disable optimizations
173 * that change the provoking vertex, since that would load incorrect
174 * values for flat varyings.
175 */
176 bool uses_flat_shading;
177 };
178
179 struct midgard_shader_info {
180 unsigned first_tag;
181 };
182
183 struct pan_shader_info {
184 gl_shader_stage stage;
185 unsigned work_reg_count;
186 unsigned tls_size;
187 unsigned wls_size;
188
189 /* Bit mask of preloaded registers */
190 uint64_t preload;
191
192 union {
193 struct {
194 bool reads_frag_coord;
195 bool reads_point_coord;
196 bool reads_face;
197 bool can_discard;
198 bool writes_depth;
199 bool writes_stencil;
200 bool writes_coverage;
201 bool sidefx;
202 bool sample_shading;
203 bool early_fragment_tests;
204 bool can_early_z, can_fpk;
205 bool untyped_color_outputs;
206 BITSET_WORD outputs_read;
207 BITSET_WORD outputs_written;
208 } fs;
209
210 struct {
211 bool writes_point_size;
212
213 /* If the primary shader writes point size, the Valhall
214 * driver may need a variant that does not write point
215 * size. Offset to such a shader in the program binary.
216 *
217 * Zero if no such variant is required.
218 *
219 * Only used with IDVS on Valhall.
220 */
221 unsigned no_psiz_offset;
222
223 /* Set if Index-Driven Vertex Shading is in use */
224 bool idvs;
225
226 /* If IDVS is used, whether a varying shader is used */
227 bool secondary_enable;
228
229 /* If a varying shader is used, the varying shader's
230 * offset in the program binary
231 */
232 unsigned secondary_offset;
233
234 /* If IDVS is in use, number of work registers used by
235 * the varying shader
236 */
237 unsigned secondary_work_reg_count;
238
239 /* If IDVS is in use, bit mask of preloaded registers
240 * used by the varying shader
241 */
242 uint64_t secondary_preload;
243 } vs;
244
245 struct {
246 /* Is it legal to merge workgroups? This is true if the
247 * shader uses neither barriers nor shared memory. This
248 * requires caution: if the API allows specifying shared
249 * memory at launch time (instead of compile time), that
250 * memory will not be accounted for by the compiler.
251 *
252 * Used by the Valhall hardware.
253 */
254 bool allow_merging_workgroups;
255 } cs;
256 };
257
258 /* Does the shader contains a barrier? or (for fragment shaders) does it
259 * require helper invocations, which demand the same ordering guarantees
260 * of the hardware? These notions are unified in the hardware, so we
261 * unify them here as well.
262 */
263 bool contains_barrier;
264 bool separable;
265 bool writes_global;
266 uint64_t outputs_written;
267
268 /* Floating point controls that the driver should try to honour */
269 bool ftz_fp16, ftz_fp32;
270
271 unsigned sampler_count;
272 unsigned texture_count;
273 unsigned ubo_count;
274 unsigned attributes_read_count;
275 unsigned attribute_count;
276 unsigned attributes_read;
277
278 struct {
279 unsigned input_count;
280 struct pan_shader_varying input[PAN_MAX_VARYINGS];
281 unsigned output_count;
282 struct pan_shader_varying output[PAN_MAX_VARYINGS];
283 } varyings;
284
285 /* UBOs to push to Register Mapped Uniforms (Midgard) or Fast Access
286 * Uniforms (Bifrost) */
287 struct panfrost_ubo_push push;
288
289 uint32_t ubo_mask;
290
291 /* Quirk for GPUs that does not support auto32 types. */
292 bool quirk_no_auto32;
293
294 union {
295 struct bifrost_shader_info bifrost;
296 struct midgard_shader_info midgard;
297 };
298 };
299
300 typedef struct pan_block {
301 /* Link to next block. Must be first for mir_get_block */
302 struct list_head link;
303
304 /* List of instructions emitted for the current block */
305 struct list_head instructions;
306
307 /* Index of the block in source order */
308 unsigned name;
309
310 /* Control flow graph */
311 struct pan_block *successors[2];
312 struct set *predecessors;
313 bool unconditional_jumps;
314
315 /* In liveness analysis, these are live masks (per-component) for
316 * indices for the block. Scalar compilers have the luxury of using
317 * simple bit fields, but for us, liveness is a vector idea. */
318 uint16_t *live_in;
319 uint16_t *live_out;
320 } pan_block;
321
322 struct pan_instruction {
323 struct list_head link;
324 };
325
326 #define pan_foreach_instr_in_block_rev(block, v) \
327 list_for_each_entry_rev(struct pan_instruction, v, &block->instructions, \
328 link)
329
330 #define pan_foreach_successor(blk, v) \
331 pan_block *v; \
332 pan_block **_v; \
333 for (_v = (pan_block **)&blk->successors[0], v = *_v; \
334 v != NULL && _v < (pan_block **)&blk->successors[2]; _v++, v = *_v)
335
336 #define pan_foreach_predecessor(blk, v) \
337 struct set_entry *_entry_##v; \
338 struct pan_block *v; \
339 for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
340 v = (struct pan_block *)(_entry_##v ? _entry_##v->key : NULL); \
341 _entry_##v != NULL; \
342 _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
343 v = (struct pan_block *)(_entry_##v ? _entry_##v->key : NULL))
344
345 static inline pan_block *
pan_exit_block(struct list_head * blocks)346 pan_exit_block(struct list_head *blocks)
347 {
348 pan_block *last = list_last_entry(blocks, pan_block, link);
349 assert(!last->successors[0] && !last->successors[1]);
350 return last;
351 }
352
353 typedef void (*pan_liveness_update)(uint16_t *, void *, unsigned max);
354
355 void pan_liveness_gen(uint16_t *live, unsigned node, unsigned max,
356 uint16_t mask);
357 void pan_liveness_kill(uint16_t *live, unsigned node, unsigned max,
358 uint16_t mask);
359 bool pan_liveness_get(uint16_t *live, unsigned node, uint16_t max);
360
361 void pan_compute_liveness(struct list_head *blocks, unsigned temp_count,
362 pan_liveness_update callback);
363
364 void pan_free_liveness(struct list_head *blocks);
365
366 uint16_t pan_to_bytemask(unsigned bytes, unsigned mask);
367
368 void pan_block_add_successor(pan_block *block, pan_block *successor);
369
370 /* IR indexing */
371 #define PAN_IS_REG (1)
372
373 /* IR printing helpers */
374 void pan_print_alu_type(nir_alu_type t, FILE *fp);
375
376 /* NIR passes to do some backend-specific lowering */
377
378 #define PAN_WRITEOUT_C 1
379 #define PAN_WRITEOUT_Z 2
380 #define PAN_WRITEOUT_S 4
381 #define PAN_WRITEOUT_2 8
382
383 bool pan_nir_lower_zs_store(nir_shader *nir);
384 bool pan_nir_lower_store_component(nir_shader *shader);
385
386 bool pan_nir_lower_image_ms(nir_shader *shader);
387
388 bool pan_lower_helper_invocation(nir_shader *shader);
389 bool pan_lower_sample_pos(nir_shader *shader);
390 bool pan_lower_xfb(nir_shader *nir);
391
392 bool pan_lower_image_index(nir_shader *shader, unsigned vs_img_attrib_offset);
393
394 void pan_nir_collect_varyings(nir_shader *s, struct pan_shader_info *info);
395
396 /*
397 * Helper returning the subgroup size. Generally, this is equal to the number of
398 * threads in a warp. For Midgard (including warping models), this returns 1, as
399 * subgroups are not supported.
400 */
401 static inline unsigned
pan_subgroup_size(unsigned arch)402 pan_subgroup_size(unsigned arch)
403 {
404 if (arch >= 9)
405 return 16;
406 else if (arch >= 7)
407 return 8;
408 else if (arch >= 6)
409 return 4;
410 else
411 return 1;
412 }
413
414 /*
415 * Helper extracting the table from a given handle of Valhall descriptor model.
416 */
417 static inline unsigned
pan_res_handle_get_table(unsigned handle)418 pan_res_handle_get_table(unsigned handle)
419 {
420 unsigned table = handle >> 24;
421
422 assert(table < 64);
423 return table;
424 }
425
426 /*
427 * Helper returning the index from a given handle of Valhall descriptor model.
428 */
429 static inline unsigned
pan_res_handle_get_index(unsigned handle)430 pan_res_handle_get_index(unsigned handle)
431 {
432 return handle & BITFIELD_MASK(24);
433 }
434
435 /*
436 * Helper creating an handle for Valhall descriptor model.
437 */
438 static inline unsigned
pan_res_handle(unsigned table,unsigned index)439 pan_res_handle(unsigned table, unsigned index)
440 {
441 assert(table < 64);
442 assert(index < (1u << 24));
443
444 return (table << 24) | index;
445 }
446
447 #endif
448