1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <[email protected]>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #ifndef V3D_CONTEXT_H
26 #define V3D_CONTEXT_H
27
28 #ifdef V3D_VERSION
29 #include "broadcom/common/v3d_macros.h"
30 #endif
31
32 #include <stdio.h>
33
34 #include "pipe/p_context.h"
35 #include "pipe/p_state.h"
36 #include "util/bitset.h"
37 #include "util/slab.h"
38 #include "util/u_dynarray.h"
39 #include "xf86drm.h"
40 #include "drm-uapi/v3d_drm.h"
41 #include "v3d_screen.h"
42 #include "broadcom/common/v3d_limits.h"
43
44 #include "broadcom/simulator/v3d_simulator.h"
45 #include "broadcom/compiler/v3d_compiler.h"
46
47 struct v3d_job;
48 struct v3d_bo;
49 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
50
51 #include "v3d_bufmgr.h"
52 #include "v3d_resource.h"
53 #include "v3d_cl.h"
54
55 #define V3D_DIRTY_BLEND (1ull << 0)
56 #define V3D_DIRTY_RASTERIZER (1ull << 1)
57 #define V3D_DIRTY_ZSA (1ull << 2)
58 #define V3D_DIRTY_COMPTEX (1ull << 3)
59 #define V3D_DIRTY_VERTTEX (1ull << 4)
60 #define V3D_DIRTY_GEOMTEX (1ull << 5)
61 #define V3D_DIRTY_FRAGTEX (1ull << 6)
62
63 #define V3D_DIRTY_SHADER_IMAGE (1ull << 9)
64 #define V3D_DIRTY_BLEND_COLOR (1ull << 10)
65 #define V3D_DIRTY_STENCIL_REF (1ull << 11)
66 #define V3D_DIRTY_SAMPLE_STATE (1ull << 12)
67 #define V3D_DIRTY_FRAMEBUFFER (1ull << 13)
68 #define V3D_DIRTY_STIPPLE (1ull << 14)
69 #define V3D_DIRTY_VIEWPORT (1ull << 15)
70 #define V3D_DIRTY_CONSTBUF (1ull << 16)
71 #define V3D_DIRTY_VTXSTATE (1ull << 17)
72 #define V3D_DIRTY_VTXBUF (1ull << 18)
73 #define V3D_DIRTY_SCISSOR (1ull << 19)
74 #define V3D_DIRTY_FLAT_SHADE_FLAGS (1ull << 20)
75 #define V3D_DIRTY_PRIM_MODE (1ull << 21)
76 #define V3D_DIRTY_CLIP (1ull << 22)
77 #define V3D_DIRTY_UNCOMPILED_CS (1ull << 23)
78 #define V3D_DIRTY_UNCOMPILED_VS (1ull << 24)
79 #define V3D_DIRTY_UNCOMPILED_GS (1ull << 25)
80 #define V3D_DIRTY_UNCOMPILED_FS (1ull << 26)
81
82 #define V3D_DIRTY_COMPILED_CS (1ull << 29)
83 #define V3D_DIRTY_COMPILED_VS (1ull << 30)
84 #define V3D_DIRTY_COMPILED_GS_BIN (1ULL << 31)
85 #define V3D_DIRTY_COMPILED_GS (1ULL << 32)
86 #define V3D_DIRTY_COMPILED_FS (1ull << 33)
87
88 #define V3D_DIRTY_FS_INPUTS (1ull << 38)
89 #define V3D_DIRTY_GS_INPUTS (1ull << 39)
90 #define V3D_DIRTY_STREAMOUT (1ull << 40)
91 #define V3D_DIRTY_OQ (1ull << 41)
92 #define V3D_DIRTY_CENTROID_FLAGS (1ull << 42)
93 #define V3D_DIRTY_NOPERSPECTIVE_FLAGS (1ull << 43)
94 #define V3D_DIRTY_SSBO (1ull << 44)
95
96 #define V3D_MAX_FS_INPUTS 64
97
98 #define MAX_JOB_SCISSORS 16
99
100 enum v3d_sampler_state_variant {
101 V3D_SAMPLER_STATE_BORDER_0000,
102 V3D_SAMPLER_STATE_BORDER_0001,
103 V3D_SAMPLER_STATE_BORDER_1111,
104 V3D_SAMPLER_STATE_F16,
105 V3D_SAMPLER_STATE_F16_UNORM,
106 V3D_SAMPLER_STATE_F16_SNORM,
107 V3D_SAMPLER_STATE_F16_BGRA,
108 V3D_SAMPLER_STATE_F16_BGRA_UNORM,
109 V3D_SAMPLER_STATE_F16_BGRA_SNORM,
110 V3D_SAMPLER_STATE_F16_A,
111 V3D_SAMPLER_STATE_F16_A_SNORM,
112 V3D_SAMPLER_STATE_F16_A_UNORM,
113 V3D_SAMPLER_STATE_F16_LA,
114 V3D_SAMPLER_STATE_F16_LA_UNORM,
115 V3D_SAMPLER_STATE_F16_LA_SNORM,
116 V3D_SAMPLER_STATE_32,
117 V3D_SAMPLER_STATE_32_UNORM,
118 V3D_SAMPLER_STATE_32_SNORM,
119 V3D_SAMPLER_STATE_32_A,
120 V3D_SAMPLER_STATE_32_A_UNORM,
121 V3D_SAMPLER_STATE_32_A_SNORM,
122 V3D_SAMPLER_STATE_1010102U,
123 V3D_SAMPLER_STATE_16U,
124 V3D_SAMPLER_STATE_16I,
125 V3D_SAMPLER_STATE_8I,
126 V3D_SAMPLER_STATE_8U,
127
128 V3D_SAMPLER_STATE_VARIANT_COUNT,
129 };
130
131 enum v3d_flush_cond {
132 /* Flush job unless we are flushing for transform feedback, where we
133 * handle flushing in the driver via the 'Wait for TF' packet.
134 */
135 V3D_FLUSH_DEFAULT,
136 /* Always flush the job, even for cases where we would normally not
137 * do it, such as transform feedback.
138 */
139 V3D_FLUSH_ALWAYS,
140 /* Flush job if it is not the current FBO job. This is intended to
141 * skip automatic flushes of the current job for resources that we
142 * expect to be externally synchronized by the application using
143 * glMemoryBarrier(), such as SSBOs and shader images.
144 */
145 V3D_FLUSH_NOT_CURRENT_JOB,
146 };
147
148 /* bitmask */
149 enum v3d_blitter_op {
150 V3D_SAVE_TEXTURES = (1u << 1),
151 V3D_SAVE_FRAMEBUFFER = (1u << 2),
152 V3D_DISABLE_RENDER_COND = (1u << 3),
153
154 V3D_BLIT = V3D_SAVE_FRAMEBUFFER | V3D_SAVE_TEXTURES,
155 V3D_BLIT_COND = V3D_BLIT | V3D_DISABLE_RENDER_COND,
156 V3D_CLEAR = 0,
157 V3D_CLEAR_COND = V3D_CLEAR | V3D_DISABLE_RENDER_COND,
158 V3D_CLEAR_SURFACE = V3D_SAVE_FRAMEBUFFER,
159 V3D_CLEAR_SURFACE_COND = V3D_CLEAR_SURFACE | V3D_DISABLE_RENDER_COND
160 };
161
162 struct v3d_sampler_view {
163 struct pipe_sampler_view base;
164 uint32_t p0;
165 uint32_t p1;
166 /* Precomputed swizzles to pass in to the shader key. */
167 uint8_t swizzle[4];
168
169 uint8_t texture_shader_state[32];
170 /* V3D 4.x: Texture state struct. */
171 struct v3d_bo *bo;
172
173 enum v3d_sampler_state_variant sampler_variant;
174
175 /* Actual texture to be read by this sampler view. May be different
176 * from base.texture in the case of having a shadow tiled copy of a
177 * raster texture.
178 */
179 struct pipe_resource *texture;
180
181 /* A serial ID used to identify cases where a new BO has been created
182 * and we need to rebind a sampler view that was created against the
183 * previous BO to to point to the new one.
184 */
185 uint32_t serial_id;
186 };
187
188 struct v3d_sampler_state {
189 struct pipe_sampler_state base;
190 uint32_t p0;
191 uint32_t p1;
192
193 /* V3D 3.x: Packed texture state. */
194 uint8_t texture_shader_state[32];
195 /* V3D 4.x: Sampler state struct. */
196 struct pipe_resource *sampler_state;
197 uint32_t sampler_state_offset[V3D_SAMPLER_STATE_VARIANT_COUNT];
198
199 bool border_color_variants;
200 };
201
202 struct v3d_texture_stateobj {
203 struct pipe_sampler_view *textures[V3D_MAX_TEXTURE_SAMPLERS];
204 unsigned num_textures;
205 struct pipe_sampler_state *samplers[V3D_MAX_TEXTURE_SAMPLERS];
206 unsigned num_samplers;
207 struct v3d_cl_reloc texture_state[V3D_MAX_TEXTURE_SAMPLERS];
208 };
209
210 struct v3d_shader_uniform_info {
211 enum quniform_contents *contents;
212 uint32_t *data;
213 uint32_t count;
214 };
215
216 struct v3d_uncompiled_shader {
217 /** A name for this program, so you can track it in shader-db output. */
218 uint32_t program_id;
219 /** How many variants of this program were compiled, for shader-db. */
220 uint32_t compiled_variant_count;
221 struct pipe_shader_state base;
222 uint32_t num_tf_outputs;
223 struct v3d_varying_slot *tf_outputs;
224 uint16_t tf_specs[16];
225 uint16_t tf_specs_psiz[16];
226 uint32_t num_tf_specs;
227
228 /* For caching */
229 unsigned char sha1[20];
230 };
231
232 struct v3d_compiled_shader {
233 struct pipe_resource *resource;
234 uint32_t offset;
235
236 union {
237 struct v3d_prog_data *base;
238 struct v3d_vs_prog_data *vs;
239 struct v3d_gs_prog_data *gs;
240 struct v3d_fs_prog_data *fs;
241 struct v3d_compute_prog_data *compute;
242 } prog_data;
243
244 /**
245 * V3D_DIRTY_* flags that, when set in v3d->dirty, mean that the
246 * uniforms have to be rewritten (and therefore the shader state
247 * reemitted).
248 */
249 uint64_t uniform_dirty_bits;
250 };
251
252 struct v3d_program_stateobj {
253 struct v3d_uncompiled_shader *bind_vs, *bind_gs, *bind_fs, *bind_compute;
254 struct v3d_compiled_shader *cs, *vs, *gs_bin, *gs, *fs, *compute;
255
256 struct hash_table *cache[MESA_SHADER_STAGES];
257
258 struct v3d_bo *spill_bo;
259 int spill_size_per_thread;
260 };
261
262 struct v3d_constbuf_stateobj {
263 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
264 BITSET_DECLARE(enabled_mask, PIPE_MAX_CONSTANT_BUFFERS);
265 BITSET_DECLARE(dirty_mask, PIPE_MAX_CONSTANT_BUFFERS);
266 };
267
268 struct v3d_vertexbuf_stateobj {
269 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
270 unsigned count;
271 BITSET_DECLARE(enabled_mask, PIPE_MAX_ATTRIBS);
272 BITSET_DECLARE(dirty_mask, PIPE_MAX_ATTRIBS);
273 };
274
275 struct v3d_vertex_stateobj {
276 struct pipe_vertex_element pipe[V3D_MAX_VS_INPUTS / 4];
277 unsigned num_elements;
278
279 uint8_t attrs[16 * (V3D_MAX_VS_INPUTS / 4)];
280 /* defaults can be NULL for some hw generation */
281 struct pipe_resource *defaults;
282 uint32_t defaults_offset;
283 };
284
285 struct v3d_stream_output_target {
286 struct pipe_stream_output_target base;
287 /* Number of transform feedback vertices written to this target */
288 uint32_t recorded_vertex_count;
289 /* Number of vertices we've written into the buffer so far */
290 uint32_t offset;
291 };
292
293 struct v3d_streamout_stateobj {
294 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
295 unsigned num_targets;
296 };
297
298 struct v3d_ssbo_stateobj {
299 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
300 BITSET_DECLARE(enabled_mask, PIPE_MAX_SHADER_BUFFERS);
301 };
302
303 /* Hash table key for v3d->jobs */
304 struct v3d_job_key {
305 struct pipe_surface *cbufs[V3D_MAX_DRAW_BUFFERS];
306 struct pipe_surface *zsbuf;
307 struct pipe_surface *bbuf;
308 };
309
310 enum v3d_ez_state {
311 V3D_EZ_UNDECIDED = 0,
312 V3D_EZ_GT_GE,
313 V3D_EZ_LT_LE,
314 V3D_EZ_DISABLED,
315 };
316
317 struct v3d_image_view {
318 struct pipe_image_view base;
319 /* V3D 4.x texture shader state struct */
320 struct pipe_resource *tex_state;
321 uint32_t tex_state_offset;
322 };
323
324 struct v3d_shaderimg_stateobj {
325 struct v3d_image_view si[PIPE_MAX_SHADER_IMAGES];
326 BITSET_DECLARE(enabled_mask, PIPE_MAX_SHADER_IMAGES);
327 };
328
329 struct v3d_perfmon_state {
330 /* The kernel perfmon id */
331 uint32_t kperfmon_id;
332 /* True if at least one job was submitted with this perfmon. */
333 bool job_submitted;
334 /* Fence to be signaled when the last job submitted with this perfmon
335 * is executed by the GPU.
336 */
337 struct v3d_fence *last_job_fence;
338 uint8_t counters[DRM_V3D_MAX_PERF_COUNTERS];
339 uint64_t values[DRM_V3D_MAX_PERF_COUNTERS];
340 };
341
342 /**
343 * A complete bin/render job.
344 *
345 * This is all of the state necessary to submit a bin/render to the kernel.
346 * We want to be able to have multiple in progress at a time, so that we don't
347 * need to flush an existing CL just to switch to rendering to a new render
348 * target (which would mean reading back from the old render target when
349 * starting to render to it again).
350 */
351 struct v3d_job {
352 struct v3d_context *v3d;
353 struct v3d_cl bcl;
354 struct v3d_cl rcl;
355 struct v3d_cl indirect;
356 struct v3d_bo *tile_alloc;
357 struct v3d_bo *tile_state;
358
359 struct drm_v3d_submit_cl submit;
360
361 /**
362 * Set of all BOs referenced by the job. This will be used for making
363 * the list of BOs that the kernel will need to have paged in to
364 * execute our job.
365 */
366 struct set *bos;
367
368 /** Sum of the sizes of the BOs referenced by the job. */
369 uint32_t referenced_size;
370
371 struct set *write_prscs;
372 struct set *tf_write_prscs;
373
374 /* Size of the submit.bo_handles array. */
375 uint32_t bo_handles_size;
376
377 /** @{
378 * Surfaces to submit rendering for.
379 * For blit operations, bbuf is the source surface, and cbufs[0] is
380 * the destination surface.
381 * For blit operations straight from the job's tile buffer, dbuf is the
382 * blit destination surface.
383 */
384 uint32_t nr_cbufs;
385 struct pipe_surface *cbufs[V3D_MAX_DRAW_BUFFERS];
386 struct pipe_surface *zsbuf;
387 struct pipe_surface *bbuf;
388 struct pipe_surface *dbuf;
389 /** @} */
390 /** @{
391 * Bounding box of the scissor across all queued drawing.
392 *
393 * Note that the max values are exclusive.
394 */
395 uint32_t draw_min_x;
396 uint32_t draw_min_y;
397 uint32_t draw_max_x;
398 uint32_t draw_max_y;
399
400 /** @} */
401 /** @{
402 * List of scissor rects used for all queued drawing. All scissor
403 * rects will be contained in the draw_{min/max}_{x/y} bounding box.
404 *
405 * This is used as an optimization when all drawing is scissored to
406 * limit tile flushing only to tiles that intersect a scissor rect.
407 * If scissor is used together with non-scissored drawing, then
408 * the optimization is disabled.
409 */
410 struct {
411 bool disabled;
412 uint32_t count;
413 struct {
414 uint32_t min_x, min_y;
415 uint32_t max_x, max_y;
416 } rects[MAX_JOB_SCISSORS];
417 } scissor;
418
419 /** @} */
420 /** @{
421 * Width/height of the color framebuffer being rendered to,
422 * for V3D_TILE_RENDERING_MODE_CONFIG.
423 */
424 uint32_t draw_width;
425 uint32_t draw_height;
426 uint32_t num_layers;
427
428 /** @} */
429 /** @{ Tile information, depending on MSAA and float color buffer. */
430 uint32_t draw_tiles_x; /** @< Number of tiles wide for framebuffer. */
431 uint32_t draw_tiles_y; /** @< Number of tiles high for framebuffer. */
432
433 uint32_t tile_width; /** @< Width of a tile. */
434 uint32_t tile_height; /** @< Height of a tile. */
435 /** maximum internal_bpp of all color render targets. */
436 uint32_t internal_bpp;
437
438 /** Whether the current rendering is in a 4X MSAA tile buffer. */
439 bool msaa;
440 /** @} */
441
442 /* Bitmask of PIPE_CLEAR_* of buffers that were cleared before the
443 * first rendering.
444 */
445 uint32_t clear_tlb;
446 /* Bitmask of PIPE_CLEAR_* of buffers that were cleared using a draw
447 * call (not necessarily before the first rendering) instead of a TLB
448 * clear.
449 */
450 uint32_t clear_draw;
451 /* Bitmask of PIPE_CLEAR_* of attached buffers that were invalidated
452 * by glInvalidateFramebuffer so we can avoid loading them.
453 */
454 uint32_t invalidated_load;
455 /* Bitmask of PIPE_CLEAR_* of buffers that have been read by a draw
456 * call without having been cleared first.
457 */
458 uint32_t load;
459 /* Bitmask of PIPE_CLEAR_* of buffers that have been rendered to
460 * (either clears or draws) and should be stored.
461 */
462 uint32_t store;
463 /* Bitmask of PIPE_CLEAR_* of buffers that need to be blitted into
464 * a destination buffer other than the jobs RT. Used to implement
465 * blits from jobs that have not yet been flushed, including MSAA
466 * resolve.
467 */
468 uint32_t blit_tlb;
469
470 uint32_t clear_color[V3D_MAX_DRAW_BUFFERS][4];
471 float clear_z;
472 uint8_t clear_s;
473
474 /* If TLB double-buffering is enabled for this job */
475 bool double_buffer;
476
477 /**
478 * Set if some drawing (triangles, blits, or just a glClear()) has
479 * been done to the FBO, meaning that we need to
480 * DRM_IOCTL_V3D_SUBMIT_CL.
481 */
482 bool needs_flush;
483
484 /* Set if any shader has dirtied cachelines in the TMU that need to be
485 * flushed before job end.
486 */
487 bool tmu_dirty_rcl;
488
489 /**
490 * Set if a packet enabling TF has been emitted in the job (V3D 4.x).
491 */
492 bool tf_enabled;
493
494 bool needs_primitives_generated;
495
496 /**
497 * Current EZ state for drawing. Updated at the start of draw after
498 * we've decided on the shader being rendered.
499 */
500 enum v3d_ez_state ez_state;
501 /**
502 * The first EZ state that was used for drawing with a decided EZ
503 * direction (so either UNDECIDED, GT, or LT).
504 */
505 enum v3d_ez_state first_ez_state;
506
507 /**
508 * If we have already decided if we need to disable early Z/S
509 * completely for this job.
510 */
511 bool decided_global_ez_enable;
512
513 /**
514 * When we decide if we nee to disable early Z/S gobally, track the
515 * Z-state we used to make that decision so we can change the decision
516 * if the state changes.
517 */
518 struct v3d_depth_stencil_alpha_state *global_ez_zsa_decision_state;
519
520 /**
521 * If this job has been configured to use early Z/S clear.
522 */
523 bool early_zs_clear;
524
525 /**
526 * Number of draw calls (not counting full buffer clears) queued in
527 * the current job.
528 */
529 uint32_t draw_calls_queued;
530
531 /**
532 * Number of draw calls (not counting full buffer clears) queued in
533 * the current job during active transform feedback.
534 */
535 uint32_t tf_draw_calls_queued;
536
537 struct v3d_job_key key;
538 };
539
540 struct v3d_context {
541 struct pipe_context base;
542
543 int fd;
544 struct v3d_screen *screen;
545
546 /** The 3D rendering job for the currently bound FBO. */
547 struct v3d_job *job;
548
549 /* Map from struct v3d_job_key to the job for that FBO.
550 */
551 struct hash_table *jobs;
552
553 /**
554 * Map from v3d_resource to a job writing to that resource.
555 *
556 * Primarily for flushing jobs rendering to textures that are now
557 * being read from.
558 */
559 struct hash_table *write_jobs;
560
561 struct slab_child_pool transfer_pool;
562 struct blitter_context *blitter;
563
564 /** bitfield of V3D_DIRTY_* */
565 uint64_t dirty;
566
567 uint32_t next_uncompiled_program_id;
568 uint64_t next_compiled_program_id;
569
570 struct v3d_compiler_state *compiler_state;
571
572 uint8_t prim_mode;
573
574 /** Maximum index buffer valid for the current shader_rec. */
575 uint32_t max_index;
576
577 /** Sync object that our RCL or TFU job will update as its out_sync. */
578 uint32_t out_sync;
579
580 /* Stream uploader used by gallium internals. This could also be used
581 * by driver internals, but we tend to use the v3d_cl.h interfaces
582 * instead.
583 */
584 struct u_upload_mgr *uploader;
585 /* State uploader used inside the driver. This is for packing bits of
586 * long-term state inside buffers, since the kernel interfaces
587 * allocate a page at a time.
588 */
589 struct u_upload_mgr *state_uploader;
590
591 struct pipe_shader_state *sand8_blit_vs;
592 struct pipe_shader_state *sand8_blit_fs_luma;
593 struct pipe_shader_state *sand8_blit_fs_chroma;
594 struct pipe_shader_state *sand30_blit_vs;
595 struct pipe_shader_state *sand30_blit_fs;
596
597 /** @{ Current pipeline state objects */
598 struct pipe_scissor_state scissor;
599 struct v3d_blend_state *blend;
600 struct v3d_rasterizer_state *rasterizer;
601 struct v3d_depth_stencil_alpha_state *zsa;
602
603 struct v3d_program_stateobj prog;
604 uint32_t compute_num_workgroups[3];
605 uint32_t compute_workgroup_size[3];
606 struct v3d_bo *compute_shared_memory;
607 uint32_t shared_memory;
608
609 struct v3d_vertex_stateobj *vtx;
610
611 struct {
612 struct pipe_blend_color f;
613 uint16_t hf[4];
614 } blend_color;
615 struct pipe_stencil_ref stencil_ref;
616 unsigned sample_mask;
617 struct pipe_framebuffer_state framebuffer;
618
619 /* Per render target, whether we should swap the R and B fields in the
620 * shader's color output and in blending. If render targets disagree
621 * on the R/B swap and use the constant color, then we would need to
622 * fall back to in-shader blending.
623 */
624 uint8_t swap_color_rb;
625
626 /* Per render target, whether we should treat the dst alpha values as
627 * one in blending.
628 *
629 * For RGBX formats, the tile buffer's alpha channel will be
630 * undefined.
631 */
632 uint8_t blend_dst_alpha_one;
633
634 bool active_queries;
635
636 /**
637 * If a compute job writes a resource read by a non-compute stage we
638 * should sync on the last compute job.
639 */
640 bool sync_on_last_compute_job;
641
642 uint32_t tf_prims_generated;
643 uint32_t prims_generated;
644 bool prim_restart;
645
646 uint32_t n_primitives_generated_queries_in_flight;
647
648 struct pipe_poly_stipple stipple;
649 struct pipe_clip_state clip;
650 struct pipe_viewport_state viewport;
651 struct v3d_ssbo_stateobj ssbo[PIPE_SHADER_TYPES];
652 struct v3d_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
653 struct v3d_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
654 struct v3d_texture_stateobj tex[PIPE_SHADER_TYPES];
655 struct v3d_vertexbuf_stateobj vertexbuf;
656 struct v3d_streamout_stateobj streamout;
657 struct v3d_bo *current_oq;
658 struct pipe_resource *prim_counts;
659 uint32_t prim_counts_offset;
660 struct v3d_perfmon_state *active_perfmon;
661 struct v3d_perfmon_state *last_perfmon;
662
663 struct pipe_query *cond_query;
664 bool cond_cond;
665 enum pipe_render_cond_flag cond_mode;
666
667 int in_fence_fd;
668 /** Handle of the syncobj that holds in_fence_fd for submission. */
669 uint32_t in_syncobj;
670
671 struct util_dynarray global_buffers;
672 /** @} */
673 };
674
675 struct v3d_rasterizer_state {
676 struct pipe_rasterizer_state base;
677
678 float point_size;
679
680 uint8_t depth_offset[9];
681 uint8_t depth_offset_z16[9];
682 };
683
684 struct v3d_depth_stencil_alpha_state {
685 struct pipe_depth_stencil_alpha_state base;
686
687 enum v3d_ez_state ez_state;
688
689 uint8_t stencil_front[6];
690 uint8_t stencil_back[6];
691 };
692
693 struct v3d_blend_state {
694 struct pipe_blend_state base;
695
696 /* Per-RT mask of whether blending is enabled. */
697 uint8_t blend_enables;
698 };
699
700 #define perf_debug(...) do { \
701 if (V3D_DBG(PERF)) \
702 fprintf(stderr, __VA_ARGS__); \
703 if (unlikely(v3d->base.debug.debug_message)) \
704 util_debug_message(&v3d->base.debug, PERF_INFO, __VA_ARGS__); \
705 } while (0)
706
707 static inline struct v3d_context *
v3d_context(struct pipe_context * pcontext)708 v3d_context(struct pipe_context *pcontext)
709 {
710 return (struct v3d_context *)pcontext;
711 }
712
713 static inline struct v3d_sampler_view *
v3d_sampler_view(struct pipe_sampler_view * psview)714 v3d_sampler_view(struct pipe_sampler_view *psview)
715 {
716 return (struct v3d_sampler_view *)psview;
717 }
718
719 static inline struct v3d_sampler_state *
v3d_sampler_state(struct pipe_sampler_state * psampler)720 v3d_sampler_state(struct pipe_sampler_state *psampler)
721 {
722 return (struct v3d_sampler_state *)psampler;
723 }
724
725 static inline struct v3d_stream_output_target *
v3d_stream_output_target(struct pipe_stream_output_target * ptarget)726 v3d_stream_output_target(struct pipe_stream_output_target *ptarget)
727 {
728 return (struct v3d_stream_output_target *)ptarget;
729 }
730
731 static inline uint32_t
v3d_stream_output_target_get_vertex_count(struct pipe_stream_output_target * ptarget)732 v3d_stream_output_target_get_vertex_count(struct pipe_stream_output_target *ptarget)
733 {
734 return v3d_stream_output_target(ptarget)->recorded_vertex_count;
735 }
736
737 int v3d_get_driver_query_group_info(struct pipe_screen *pscreen,
738 unsigned index,
739 struct pipe_driver_query_group_info *info);
740 int v3d_get_driver_query_info(struct pipe_screen *pscreen, unsigned index,
741 struct pipe_driver_query_info *info);
742
743 struct pipe_context *v3d_context_create(struct pipe_screen *pscreen,
744 void *priv, unsigned flags);
745 void v3d_program_init(struct pipe_context *pctx);
746 void v3d_program_fini(struct pipe_context *pctx);
747 void v3d_query_init(struct pipe_context *pctx);
748
749 static inline int
v3d_ioctl(int fd,unsigned long request,void * arg)750 v3d_ioctl(int fd, unsigned long request, void *arg)
751 {
752 #if USE_V3D_SIMULATOR
753 return v3d_simulator_ioctl(fd, request, arg);
754 #else
755 return drmIoctl(fd, request, arg);
756 #endif
757 }
758
759 static inline bool
v3d_transform_feedback_enabled(struct v3d_context * v3d)760 v3d_transform_feedback_enabled(struct v3d_context *v3d)
761 {
762 return (v3d->prog.bind_vs->num_tf_specs != 0 ||
763 (v3d->prog.bind_gs && v3d->prog.bind_gs->num_tf_specs != 0)) &&
764 v3d->active_queries;
765 }
766
767 void v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader);
768 struct v3d_cl_reloc v3d_write_uniforms(struct v3d_context *v3d,
769 struct v3d_job *job,
770 struct v3d_compiled_shader *shader,
771 enum pipe_shader_type stage);
772
773 void v3d_flush(struct pipe_context *pctx);
774 void v3d_job_init(struct v3d_context *v3d);
775 struct v3d_job *v3d_job_create(struct v3d_context *v3d);
776 void v3d_job_free(struct v3d_context *v3d, struct v3d_job *job);
777 struct v3d_job *v3d_get_job(struct v3d_context *v3d,
778 uint32_t nr_cbufs,
779 struct pipe_surface **cbufs,
780 struct pipe_surface *zsbuf,
781 struct pipe_surface *bbuf);
782 struct v3d_job *v3d_get_job_for_fbo(struct v3d_context *v3d);
783 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
784 void v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
785 void v3d_job_add_tf_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
786 void v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job);
787 void v3d_flush_jobs_using_bo(struct v3d_context *v3d, struct v3d_bo *bo);
788 void v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
789 struct pipe_resource *prsc,
790 enum v3d_flush_cond flush_cond,
791 bool is_compute_pipeline);
792 void v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
793 struct pipe_resource *prsc,
794 enum v3d_flush_cond flush_cond,
795 bool is_compute_pipeline);
796 void v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode);
797 void v3d_update_compiled_cs(struct v3d_context *v3d);
798
799 bool v3d_rt_format_supported(const struct v3d_device_info *devinfo,
800 enum pipe_format f);
801 bool v3d_tex_format_supported(const struct v3d_device_info *devinfo,
802 enum pipe_format f);
803 uint8_t v3d_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f);
804 uint8_t v3d_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f);
805 uint8_t v3d_get_tex_return_size(const struct v3d_device_info *devinfo,
806 enum pipe_format f);
807 uint8_t v3d_get_tex_return_channels(const struct v3d_device_info *devinfo,
808 enum pipe_format f);
809 const uint8_t *v3d_get_format_swizzle(const struct v3d_device_info *devinfo,
810 enum pipe_format f);
811 bool v3d_format_supports_tlb_msaa_resolve(const struct v3d_device_info *devinfo,
812 enum pipe_format f);
813
814 void v3d_init_query_functions(struct v3d_context *v3d);
815 void v3d_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
816 void v3d_blitter_save(struct v3d_context *v3d, enum v3d_blitter_op op);
817 bool v3d_generate_mipmap(struct pipe_context *pctx,
818 struct pipe_resource *prsc,
819 enum pipe_format format,
820 unsigned int base_level,
821 unsigned int last_level,
822 unsigned int first_layer,
823 unsigned int last_layer);
824
825 void
826 v3d_fence_unreference(struct v3d_fence **fence);
827
828 struct v3d_fence *v3d_fence_create(struct v3d_context *v3d, int fd);
829
830 bool v3d_fence_wait(struct v3d_screen *screen,
831 struct v3d_fence *fence,
832 uint64_t timeout_ns);
833
834 int v3d_fence_context_init(struct v3d_context *v3d);
835 void v3d_fence_context_finish(struct v3d_context *v3d);
836
837 void v3d_update_primitive_counters(struct v3d_context *v3d);
838
839 bool v3d_line_smoothing_enabled(struct v3d_context *v3d);
840
841 float v3d_get_real_line_width(struct v3d_context *v3d);
842
843 void v3d_ensure_prim_counts_allocated(struct v3d_context *ctx);
844
845 void v3d_flag_dirty_sampler_state(struct v3d_context *v3d,
846 enum pipe_shader_type shader);
847
848 void v3d_get_tile_buffer_size(const struct v3d_device_info *devinfo,
849 bool is_msaa,
850 bool double_buffer,
851 uint32_t nr_cbufs,
852 struct pipe_surface **cbufs,
853 struct pipe_surface *bbuf,
854 uint32_t *tile_width,
855 uint32_t *tile_height,
856 uint32_t *max_bpp);
857
858 bool v3d_render_condition_check(struct v3d_context *v3d);
859
860 #ifdef ENABLE_SHADER_CACHE
861 struct v3d_compiled_shader *v3d_disk_cache_retrieve(struct v3d_context *v3d,
862 const struct v3d_key *key,
863 const struct v3d_uncompiled_shader *uncompiled);
864
865 void v3d_disk_cache_store(struct v3d_context *v3d,
866 const struct v3d_key *key,
867 const struct v3d_uncompiled_shader *uncompiled,
868 const struct v3d_compiled_shader *shader,
869 uint64_t *qpu_insts,
870 uint32_t qpu_size);
871 #endif /* ENABLE_SHADER_CACHE */
872
873 /* Helper to call hw ver specific functions */
874 #define v3d_X(devinfo, thing) ({ \
875 __typeof(&v3d42_##thing) v3d_X_thing; \
876 switch (devinfo->ver) { \
877 case 42: \
878 v3d_X_thing = &v3d42_##thing; \
879 break; \
880 case 71: \
881 v3d_X_thing = &v3d71_##thing; \
882 break; \
883 default: \
884 unreachable("Unsupported hardware generation"); \
885 } \
886 v3d_X_thing; \
887 })
888
889 #ifdef v3dX
890 # include "v3dx_context.h"
891 #else
892 # define v3dX(x) v3d42_##x
893 # include "v3dx_context.h"
894 # undef v3dX
895
896 # define v3dX(x) v3d71_##x
897 # include "v3dx_context.h"
898 # undef v3dX
899 #endif
900
901 #endif /* V3D_CONTEXT_H */
902