xref: /aosp_15_r20/external/mesa3d/src/intel/vulkan/genX_blorp_exec.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 
26 #include "anv_private.h"
27 #include "anv_measure.h"
28 
29 /* These are defined in anv_private.h and blorp_genX_exec_brw.h */
30 #undef __gen_address_type
31 #undef __gen_user_data
32 #undef __gen_combine_address
33 
34 #include "common/intel_l3_config.h"
35 #include "blorp/blorp_genX_exec_brw.h"
36 
37 #include "ds/intel_tracepoints.h"
38 
blorp_measure_start(struct blorp_batch * _batch,const struct blorp_params * params)39 static void blorp_measure_start(struct blorp_batch *_batch,
40                                 const struct blorp_params *params)
41 {
42    struct anv_cmd_buffer *cmd_buffer = _batch->driver_batch;
43    trace_intel_begin_blorp(&cmd_buffer->trace);
44    anv_measure_snapshot(cmd_buffer,
45                         blorp_op_to_intel_measure_snapshot(params->op),
46                         NULL, 0);
47 }
48 
blorp_measure_end(struct blorp_batch * _batch,const struct blorp_params * params)49 static void blorp_measure_end(struct blorp_batch *_batch,
50                               const struct blorp_params *params)
51 {
52    struct anv_cmd_buffer *cmd_buffer = _batch->driver_batch;
53    trace_intel_end_blorp(&cmd_buffer->trace,
54                          params->op,
55                          params->x1 - params->x0,
56                          params->y1 - params->y0,
57                          params->num_samples,
58                          params->shader_pipeline,
59                          params->dst.view.format,
60                          params->src.view.format,
61                          (_batch->flags & BLORP_BATCH_PREDICATE_ENABLE));
62 }
63 
64 static void *
blorp_emit_dwords(struct blorp_batch * batch,unsigned n)65 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
66 {
67    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
68    return anv_batch_emit_dwords(&cmd_buffer->batch, n);
69 }
70 
71 static uint64_t
blorp_emit_reloc(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)72 blorp_emit_reloc(struct blorp_batch *batch,
73                  void *location, struct blorp_address address, uint32_t delta)
74 {
75    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
76    struct anv_address anv_addr = {
77       .bo = address.buffer,
78       .offset = address.offset,
79    };
80    anv_reloc_list_add_bo(cmd_buffer->batch.relocs, anv_addr.bo);
81    return anv_address_physical(anv_address_add(anv_addr, delta));
82 }
83 
84 static void
blorp_surface_reloc(struct blorp_batch * batch,uint32_t ss_offset,struct blorp_address address,uint32_t delta)85 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
86                     struct blorp_address address, uint32_t delta)
87 {
88    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
89 
90    VkResult result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
91                                            address.buffer);
92    if (unlikely(result != VK_SUCCESS))
93       anv_batch_set_error(&cmd_buffer->batch, result);
94 }
95 
96 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address address)97 blorp_get_surface_address(struct blorp_batch *blorp_batch,
98                           struct blorp_address address)
99 {
100    struct anv_address anv_addr = {
101       .bo = address.buffer,
102       .offset = address.offset,
103    };
104    return anv_address_physical(anv_addr);
105 }
106 
107 #if GFX_VER == 9
108 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * batch)109 blorp_get_surface_base_address(struct blorp_batch *batch)
110 {
111    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
112    return (struct blorp_address) {
113       .buffer = cmd_buffer->device->internal_surface_state_pool.block_pool.bo,
114       .offset = -cmd_buffer->device->internal_surface_state_pool.start_offset,
115    };
116 }
117 #endif
118 
119 static uint32_t
blorp_get_dynamic_state(struct blorp_batch * batch,enum blorp_dynamic_state name)120 blorp_get_dynamic_state(struct blorp_batch *batch,
121                         enum blorp_dynamic_state name)
122 {
123    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
124    return cmd_buffer->device->blorp.dynamic_states[name].offset;
125 }
126 
127 static void *
blorp_alloc_dynamic_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)128 blorp_alloc_dynamic_state(struct blorp_batch *batch,
129                           uint32_t size,
130                           uint32_t alignment,
131                           uint32_t *offset)
132 {
133    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
134 
135    struct anv_state state =
136       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
137 
138    *offset = state.offset;
139    return state.map;
140 }
141 
142 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)143 blorp_alloc_general_state(struct blorp_batch *batch,
144                           uint32_t size,
145                           uint32_t alignment,
146                           uint32_t *offset)
147 {
148    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
149 
150    struct anv_state state =
151       anv_cmd_buffer_alloc_general_state(cmd_buffer, size, alignment);
152 
153    *offset = state.offset;
154    return state.map;
155 }
156 
157 static bool
blorp_alloc_binding_table(struct blorp_batch * batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)158 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
159                           unsigned state_size, unsigned state_alignment,
160                           uint32_t *bt_offset,
161                           uint32_t *surface_offsets, void **surface_maps)
162 {
163    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
164 
165    uint32_t state_offset;
166    struct anv_state bt_state;
167 
168    VkResult result =
169       anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, num_entries,
170                                                &state_offset, &bt_state);
171    if (result != VK_SUCCESS)
172       return false;
173 
174    uint32_t *bt_map = bt_state.map;
175    *bt_offset = bt_state.offset;
176 
177    for (unsigned i = 0; i < num_entries; i++) {
178       struct anv_state surface_state =
179          anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
180       if (surface_state.map == NULL)
181          return false;
182 
183       bt_map[i] = surface_state.offset + state_offset;
184       surface_offsets[i] = surface_state.offset;
185       surface_maps[i] = surface_state.map;
186    }
187 
188    return true;
189 }
190 
191 static uint32_t
blorp_binding_table_offset_to_pointer(struct blorp_batch * batch,uint32_t offset)192 blorp_binding_table_offset_to_pointer(struct blorp_batch *batch,
193                                       uint32_t offset)
194 {
195    return offset;
196 }
197 
198 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * batch,uint32_t size,struct blorp_address * addr)199 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
200                           struct blorp_address *addr)
201 {
202    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
203    struct anv_state vb_state =
204       anv_cmd_buffer_alloc_temporary_state(cmd_buffer, size, 64);
205    struct anv_address vb_addr =
206       anv_cmd_buffer_temporary_state_address(cmd_buffer, vb_state);
207 
208    *addr = (struct blorp_address) {
209       .buffer = vb_addr.bo,
210       .offset = vb_addr.offset,
211       .mocs = isl_mocs(&cmd_buffer->device->isl_dev,
212                        ISL_SURF_USAGE_VERTEX_BUFFER_BIT, false),
213    };
214 
215    return vb_state.map;
216 }
217 
218 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * batch,const struct blorp_address * addrs,uint32_t * sizes,unsigned num_vbs)219 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
220                                            const struct blorp_address *addrs,
221                                            uint32_t *sizes,
222                                            unsigned num_vbs)
223 {
224 #if GFX_VER == 9
225    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
226 
227    for (unsigned i = 0; i < num_vbs; i++) {
228       struct anv_address anv_addr = {
229          .bo = addrs[i].buffer,
230          .offset = addrs[i].offset,
231       };
232       genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer,
233                                                      i, anv_addr, sizes[i]);
234    }
235 
236    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
237 
238    /* Technically, we should call this *after* 3DPRIMITIVE but it doesn't
239     * really matter for blorp because we never call apply_pipe_flushes after
240     * this point.
241     */
242    genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(cmd_buffer, SEQUENTIAL,
243                                                        (1 << num_vbs) - 1);
244 #endif
245 }
246 
247 UNUSED static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * batch)248 blorp_get_workaround_address(struct blorp_batch *batch)
249 {
250    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
251 
252    return (struct blorp_address) {
253       .buffer = cmd_buffer->device->workaround_address.bo,
254       .offset = cmd_buffer->device->workaround_address.offset,
255    };
256 }
257 
258 static void
blorp_flush_range(struct blorp_batch * batch,void * start,size_t size)259 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
260 {
261    /* We don't need to flush states anymore, since everything will be snooped.
262     */
263 }
264 
265 static void
blorp_pre_emit_urb_config(struct blorp_batch * blorp_batch,struct intel_urb_config * urb_cfg)266 blorp_pre_emit_urb_config(struct blorp_batch *blorp_batch,
267                           struct intel_urb_config *urb_cfg)
268 {
269    struct anv_cmd_buffer *cmd_buffer = blorp_batch->driver_batch;
270    genX(urb_workaround)(cmd_buffer, urb_cfg);
271 
272    /* Update urb config. */
273    memcpy(&cmd_buffer->state.gfx.urb_cfg, urb_cfg,
274           sizeof(struct intel_urb_config));
275 }
276 
277 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * batch)278 blorp_get_l3_config(struct blorp_batch *batch)
279 {
280    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
281    return cmd_buffer->state.current_l3_config;
282 }
283 
284 static void
blorp_exec_on_render(struct blorp_batch * batch,const struct blorp_params * params)285 blorp_exec_on_render(struct blorp_batch *batch,
286                      const struct blorp_params *params)
287 {
288    assert((batch->flags & BLORP_BATCH_USE_COMPUTE) == 0);
289 
290    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
291    assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT);
292 
293    struct anv_gfx_dynamic_state *hw_state =
294       &cmd_buffer->state.gfx.dyn_state;
295 
296    const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
297    genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, params->x1 - params->x0,
298                                       params->y1 - params->y0, scale);
299 
300 #if GFX_VER >= 11
301    /* The PIPE_CONTROL command description says:
302     *
303     *    "Whenever a Binding Table Index (BTI) used by a Render Target Message
304     *     points to a different RENDER_SURFACE_STATE, SW must issue a Render
305     *     Target Cache Flush by enabling this bit. When render target flush
306     *     is set due to new association of BTI, PS Scoreboard Stall bit must
307     *     be set in this packet."
308     */
309    if (blorp_uses_bti_rt_writes(batch, params)) {
310       anv_add_pending_pipe_bits(cmd_buffer,
311                                 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
312                                 ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
313                                 "before blorp BTI change");
314    }
315 #endif
316 
317 #if INTEL_WA_18019816803_GFX_VER
318    /* Check if blorp ds state matches ours. */
319    if (intel_needs_workaround(cmd_buffer->device->info, 18019816803)) {
320       bool blorp_ds_state = params->depth.enabled || params->stencil.enabled;
321       if (hw_state->ds_write_state != blorp_ds_state) {
322          /* Flag the change in ds_write_state so that the next pipeline use
323           * will trigger a PIPE_CONTROL too.
324           */
325          hw_state->ds_write_state = blorp_ds_state;
326          BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WA_18019816803);
327 
328          /* Add the stall that will flush prior to the blorp operation by
329           * genX(cmd_buffer_apply_pipe_flushes)
330           */
331          anv_add_pending_pipe_bits(cmd_buffer,
332                                    ANV_PIPE_PSS_STALL_SYNC_BIT,
333                                    "Wa_18019816803");
334       }
335    }
336 #endif
337 
338 #if INTEL_WA_14018283232_GFX_VER
339    genX(cmd_buffer_ensure_wa_14018283232)(cmd_buffer, false);
340 #endif
341 
342 #if INTEL_WA_18038825448_GFX_VER
343    if (genX(cmd_buffer_set_coarse_pixel_active)
344        (cmd_buffer, ANV_COARSE_PIXEL_STATE_DISABLED)) {
345       batch->flags |= BLORP_BATCH_FORCE_CPS_DEPENDENCY;
346    }
347 #endif
348 
349    if (params->depth.enabled &&
350        !(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
351       genX(cmd_buffer_emit_gfx12_depth_wa)(cmd_buffer, &params->depth.surf);
352 
353    genX(flush_pipeline_select_3d)(cmd_buffer);
354 
355    /* Wa_14015814527 */
356    genX(apply_task_urb_workaround)(cmd_buffer);
357 
358    /* Apply any outstanding flushes in case pipeline select haven't. */
359    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
360 
361    /* BLORP doesn't do anything fancy with depth such as discards, so we want
362     * the PMA fix off.  Also, off is always the safe option.
363     */
364    genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
365 
366    blorp_exec(batch, params);
367 
368 #if GFX_VER >= 11
369    /* The PIPE_CONTROL command description says:
370     *
371     *    "Whenever a Binding Table Index (BTI) used by a Render Target Message
372     *     points to a different RENDER_SURFACE_STATE, SW must issue a Render
373     *     Target Cache Flush by enabling this bit. When render target flush
374     *     is set due to new association of BTI, PS Scoreboard Stall bit must
375     *     be set in this packet."
376     */
377    if (blorp_uses_bti_rt_writes(batch, params)) {
378       anv_add_pending_pipe_bits(cmd_buffer,
379                                 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
380                                 ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
381                                 "after blorp BTI change");
382    }
383 #endif
384 
385    /* Flag all the instructions emitted by BLORP. */
386    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_URB);
387    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_STATISTICS);
388    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF);
389    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_TOPOLOGY);
390    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VERTEX_INPUT);
391    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS);
392 #if GFX_VER >= 11
393    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS_2);
394 #endif
395 #if GFX_VER >= 12
396    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PRIMITIVE_REPLICATION);
397 #endif
398    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VIEWPORT_CC_PTR);
399    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_STREAMOUT);
400    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_RASTER);
401    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_CLIP);
402    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SAMPLE_MASK);
403    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MULTISAMPLE);
404    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SF);
405    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SBE);
406    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SBE_SWIZ);
407    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DEPTH_BOUNDS);
408    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM);
409    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM_DEPTH_STENCIL);
410    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VS);
411    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_HS);
412    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DS);
413    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TE);
414    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_GS);
415    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS);
416    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_EXTRA);
417    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_BLEND_STATE_PTR);
418    if (batch->blorp->config.use_mesh_shading) {
419       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MESH_CONTROL);
420       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TASK_CONTROL);
421    }
422    if (params->wm_prog_data) {
423       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_CC_STATE_PTR);
424       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_BLEND);
425    }
426 
427    anv_cmd_dirty_mask_t dirty = ~(ANV_CMD_DIRTY_INDEX_BUFFER |
428                                   ANV_CMD_DIRTY_XFB_ENABLE |
429                                   ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE |
430                                   ANV_CMD_DIRTY_FS_MSAA_FLAGS |
431                                   ANV_CMD_DIRTY_RESTART_INDEX |
432                                   ANV_CMD_DIRTY_COARSE_PIXEL_ACTIVE);
433 
434    cmd_buffer->state.gfx.vb_dirty = ~0;
435    cmd_buffer->state.gfx.dirty |= dirty;
436    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
437 }
438 
439 static void
blorp_exec_on_compute(struct blorp_batch * batch,const struct blorp_params * params)440 blorp_exec_on_compute(struct blorp_batch *batch,
441                       const struct blorp_params *params)
442 {
443    assert(batch->flags & BLORP_BATCH_USE_COMPUTE);
444 
445    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
446    assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_COMPUTE_BIT);
447 
448    genX(flush_pipeline_select_gpgpu)(cmd_buffer);
449 
450    /* Apply any outstanding flushes in case pipeline select haven't. */
451    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
452 
453    blorp_exec(batch, params);
454 
455    cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
456    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
457    cmd_buffer->state.compute.pipeline_dirty = true;
458 }
459 
460 static void
blorp_exec_on_blitter(struct blorp_batch * batch,const struct blorp_params * params)461 blorp_exec_on_blitter(struct blorp_batch *batch,
462                       const struct blorp_params *params)
463 {
464    assert(batch->flags & BLORP_BATCH_USE_BLITTER);
465 
466    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
467    assert(anv_cmd_buffer_is_blitter_queue(cmd_buffer));
468 
469    blorp_exec(batch, params);
470 }
471 
472 static enum isl_aux_op
get_color_aux_op(const struct blorp_params * params)473 get_color_aux_op(const struct blorp_params *params)
474 {
475    switch (params->op) {
476    case BLORP_OP_CCS_RESOLVE:
477    case BLORP_OP_CCS_PARTIAL_RESOLVE:
478    case BLORP_OP_CCS_COLOR_CLEAR:
479    case BLORP_OP_MCS_COLOR_CLEAR:
480       assert(params->fast_clear_op != ISL_AUX_OP_NONE);
481       return params->fast_clear_op;
482 
483    /* Some auxiliary surface operations are not provided by hardware. To
484     * provide that functionality, BLORP sometimes tries to emulate what
485     * hardware would do with custom pixel shaders. For now, we assume that
486     * BLORP's implementation has the same cache invalidation and flushing
487     * requirements as similar hardware operations.
488     */
489    case BLORP_OP_CCS_AMBIGUATE:
490       assert(GFX_VER >= 11 || params->fast_clear_op == ISL_AUX_OP_NONE);
491       return ISL_AUX_OP_AMBIGUATE;
492    case BLORP_OP_MCS_AMBIGUATE:
493       assert(params->fast_clear_op == ISL_AUX_OP_NONE);
494       return ISL_AUX_OP_AMBIGUATE;
495    case BLORP_OP_MCS_PARTIAL_RESOLVE:
496       assert(params->fast_clear_op == ISL_AUX_OP_NONE);
497       return ISL_AUX_OP_PARTIAL_RESOLVE;
498 
499    /* If memory aliasing is being done on an image, a pending fast clear
500     * could hit the destination address at an unknown time. Go back to the
501     * regular drawing mode to avoid this case.
502     */
503    case BLORP_OP_HIZ_AMBIGUATE:
504    case BLORP_OP_HIZ_CLEAR:
505    case BLORP_OP_HIZ_RESOLVE:
506    case BLORP_OP_SLOW_DEPTH_CLEAR:
507       assert(params->fast_clear_op == ISL_AUX_OP_NONE);
508       return ISL_AUX_OP_NONE;
509 
510    /* The remaining operations are considered regular draws. */
511    case BLORP_OP_SLOW_COLOR_CLEAR:
512    case BLORP_OP_BLIT:
513    case BLORP_OP_COPY:
514       assert(params->fast_clear_op == ISL_AUX_OP_NONE);
515       return ISL_AUX_OP_NONE;
516    }
517 
518    unreachable("Invalid value in params->op");
519 }
520 
521 void
genX(blorp_exec)522 genX(blorp_exec)(struct blorp_batch *batch,
523                  const struct blorp_params *params)
524 {
525    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
526 
527    /* Turn on preemption if it was toggled off. */
528    if (!cmd_buffer->state.gfx.object_preemption)
529       genX(cmd_buffer_set_preemption)(cmd_buffer, true);
530 
531    if (!cmd_buffer->state.current_l3_config) {
532       const struct intel_l3_config *cfg =
533          intel_get_default_l3_config(cmd_buffer->device->info);
534       genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
535    }
536 
537    /* Flush any in-progress CCS/MCS operations as needed. */
538    const enum isl_aux_op aux_op = get_color_aux_op(params);
539    genX(cmd_buffer_update_color_aux_op(cmd_buffer, aux_op));
540 
541    if (batch->flags & BLORP_BATCH_USE_BLITTER)
542       blorp_exec_on_blitter(batch, params);
543    else if (batch->flags & BLORP_BATCH_USE_COMPUTE)
544       blorp_exec_on_compute(batch, params);
545    else
546       blorp_exec_on_render(batch, params);
547 }
548 
549 static void
blorp_emit_pre_draw(struct blorp_batch * batch,const struct blorp_params * params)550 blorp_emit_pre_draw(struct blorp_batch *batch, const struct blorp_params *params)
551 {
552    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
553    blorp_measure_start(batch, params);
554    genX(emit_breakpoint)(&cmd_buffer->batch, cmd_buffer->device, true);
555 }
556 
557 static void
blorp_emit_post_draw(struct blorp_batch * batch,const struct blorp_params * params)558 blorp_emit_post_draw(struct blorp_batch *batch, const struct blorp_params *params)
559 {
560    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
561 
562    genX(batch_emit_post_3dprimitive_was)(&cmd_buffer->batch,
563                                          cmd_buffer->device,
564                                          _3DPRIM_RECTLIST,
565                                          3);
566 
567    genX(emit_breakpoint)(&cmd_buffer->batch, cmd_buffer->device, false);
568    blorp_measure_end(batch, params);
569 }
570 
571 void
genX(blorp_init_dynamic_states)572 genX(blorp_init_dynamic_states)(struct blorp_context *context)
573 {
574    blorp_init_dynamic_states(context);
575 }
576