xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/radv_cs.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based on si_state.c
6  * Copyright © 2015 Advanced Micro Devices, Inc.
7  *
8  * SPDX-License-Identifier: MIT
9  */
10 
11 #include "radv_buffer.h"
12 #include "radv_cs.h"
13 #include "radv_debug.h"
14 #include "radv_shader.h"
15 #include "radv_sqtt.h"
16 #include "sid.h"
17 
18 void
radv_cs_emit_write_event_eop(struct radeon_cmdbuf * cs,enum amd_gfx_level gfx_level,enum radv_queue_family qf,unsigned event,unsigned event_flags,unsigned dst_sel,unsigned data_sel,uint64_t va,uint32_t new_fence,uint64_t gfx9_eop_bug_va)19 radv_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level, enum radv_queue_family qf,
20                              unsigned event, unsigned event_flags, unsigned dst_sel, unsigned data_sel, uint64_t va,
21                              uint32_t new_fence, uint64_t gfx9_eop_bug_va)
22 {
23    if (qf == RADV_QUEUE_TRANSFER) {
24       radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, SDMA_FENCE_MTYPE_UC));
25       radeon_emit(cs, va);
26       radeon_emit(cs, va >> 32);
27       radeon_emit(cs, new_fence);
28       return;
29    }
30 
31    const bool is_mec = qf == RADV_QUEUE_COMPUTE && gfx_level >= GFX7;
32    unsigned op =
33       EVENT_TYPE(event) | EVENT_INDEX(event == V_028A90_CS_DONE || event == V_028A90_PS_DONE ? 6 : 5) | event_flags;
34    unsigned is_gfx8_mec = is_mec && gfx_level < GFX9;
35    unsigned sel = EOP_DST_SEL(dst_sel) | EOP_DATA_SEL(data_sel);
36 
37    /* Wait for write confirmation before writing data, but don't send
38     * an interrupt. */
39    if (data_sel != EOP_DATA_SEL_DISCARD)
40       sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
41 
42    if (gfx_level >= GFX9 || is_gfx8_mec) {
43       /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
44        * counters) must immediately precede every timestamp event to
45        * prevent a GPU hang on GFX9.
46        */
47       if (gfx_level == GFX9 && !is_mec) {
48          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
49          radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
50          radeon_emit(cs, gfx9_eop_bug_va);
51          radeon_emit(cs, gfx9_eop_bug_va >> 32);
52       }
53 
54       radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false));
55       radeon_emit(cs, op);
56       radeon_emit(cs, sel);
57       radeon_emit(cs, va);        /* address lo */
58       radeon_emit(cs, va >> 32);  /* address hi */
59       radeon_emit(cs, new_fence); /* immediate data lo */
60       radeon_emit(cs, 0);         /* immediate data hi */
61       if (!is_gfx8_mec)
62          radeon_emit(cs, 0); /* unused */
63    } else {
64       /* On GFX6, EOS events are always emitted with EVENT_WRITE_EOS.
65        * On GFX7+, EOS events are emitted with EVENT_WRITE_EOS on
66        * the graphics queue, and with RELEASE_MEM on the compute
67        * queue.
68        */
69       if (event == V_028B9C_CS_DONE || event == V_028B9C_PS_DONE) {
70          assert(event_flags == 0 && dst_sel == EOP_DST_SEL_MEM && data_sel == EOP_DATA_SEL_VALUE_32BIT);
71 
72          if (is_mec) {
73             radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, false));
74             radeon_emit(cs, op);
75             radeon_emit(cs, sel);
76             radeon_emit(cs, va);        /* address lo */
77             radeon_emit(cs, va >> 32);  /* address hi */
78             radeon_emit(cs, new_fence); /* immediate data lo */
79             radeon_emit(cs, 0);         /* immediate data hi */
80          } else {
81             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, false));
82             radeon_emit(cs, op);
83             radeon_emit(cs, va);
84             radeon_emit(cs, ((va >> 32) & 0xffff) | EOS_DATA_SEL(EOS_DATA_SEL_VALUE_32BIT));
85             radeon_emit(cs, new_fence);
86          }
87       } else {
88          if (gfx_level == GFX7 || gfx_level == GFX8) {
89             /* Two EOP events are required to make all
90              * engines go idle (and optional cache flushes
91              * executed) before the timestamp is written.
92              */
93             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
94             radeon_emit(cs, op);
95             radeon_emit(cs, va);
96             radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
97             radeon_emit(cs, 0); /* immediate data */
98             radeon_emit(cs, 0); /* unused */
99          }
100 
101          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
102          radeon_emit(cs, op);
103          radeon_emit(cs, va);
104          radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
105          radeon_emit(cs, new_fence); /* immediate data */
106          radeon_emit(cs, 0);         /* unused */
107       }
108    }
109 }
110 
111 static void
radv_emit_acquire_mem(struct radeon_cmdbuf * cs,bool is_mec,bool is_gfx9,unsigned cp_coher_cntl)112 radv_emit_acquire_mem(struct radeon_cmdbuf *cs, bool is_mec, bool is_gfx9, unsigned cp_coher_cntl)
113 {
114    if (is_mec || is_gfx9) {
115       uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
116       radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) | PKT3_SHADER_TYPE_S(is_mec));
117       radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
118       radeon_emit(cs, 0xffffffff);    /* CP_COHER_SIZE */
119       radeon_emit(cs, hi_val);        /* CP_COHER_SIZE_HI */
120       radeon_emit(cs, 0);             /* CP_COHER_BASE */
121       radeon_emit(cs, 0);             /* CP_COHER_BASE_HI */
122       radeon_emit(cs, 0x0000000A);    /* POLL_INTERVAL */
123    } else {
124       /* ACQUIRE_MEM is only required on a compute ring. */
125       radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false));
126       radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
127       radeon_emit(cs, 0xffffffff);    /* CP_COHER_SIZE */
128       radeon_emit(cs, 0);             /* CP_COHER_BASE */
129       radeon_emit(cs, 0x0000000A);    /* POLL_INTERVAL */
130    }
131 }
132 
133 static void
gfx10_cs_emit_cache_flush(struct radeon_cmdbuf * cs,enum amd_gfx_level gfx_level,uint32_t * flush_cnt,uint64_t flush_va,enum radv_queue_family qf,enum radv_cmd_flush_bits flush_bits,enum rgp_flush_bits * sqtt_flush_bits,uint64_t gfx9_eop_bug_va)134 gfx10_cs_emit_cache_flush(struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level, uint32_t *flush_cnt,
135                           uint64_t flush_va, enum radv_queue_family qf, enum radv_cmd_flush_bits flush_bits,
136                           enum rgp_flush_bits *sqtt_flush_bits, uint64_t gfx9_eop_bug_va)
137 {
138    const bool is_mec = qf == RADV_QUEUE_COMPUTE;
139    uint32_t gcr_cntl = 0;
140    unsigned cb_db_event = 0;
141 
142    /* We don't need these. */
143    assert(!(flush_bits & (RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
144 
145    if (flush_bits & RADV_CMD_FLAG_INV_ICACHE) {
146       gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
147 
148       *sqtt_flush_bits |= RGP_FLUSH_INVAL_ICACHE;
149    }
150    if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
151       /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
152        * to FORWARD when both L1 and L2 are written out (WB or INV).
153        */
154       gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
155 
156       *sqtt_flush_bits |= RGP_FLUSH_INVAL_SMEM_L0;
157    }
158    if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
159       gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
160 
161       *sqtt_flush_bits |= RGP_FLUSH_INVAL_VMEM_L0 | RGP_FLUSH_INVAL_L1;
162    }
163    if (flush_bits & RADV_CMD_FLAG_INV_L2) {
164       /* Writeback and invalidate everything in L2. */
165       gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | (gfx_level < GFX12 ? S_586_GLM_INV(1) | S_586_GLM_WB(1) : 0);
166 
167       *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2;
168    } else if (flush_bits & RADV_CMD_FLAG_WB_L2) {
169       /* Writeback but do not invalidate.
170        * GLM doesn't support WB alone. If WB is set, INV must be set too.
171        */
172       gcr_cntl |= S_586_GL2_WB(1) | (gfx_level < GFX12 ? S_586_GLM_WB(1) | S_586_GLM_INV(1) : 0);
173 
174       *sqtt_flush_bits |= RGP_FLUSH_FLUSH_L2;
175    } else if (flush_bits & RADV_CMD_FLAG_INV_L2_METADATA) {
176       assert(gfx_level < GFX12);
177       gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
178    }
179 
180    if (flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
181       /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_CB_META */
182       if (gfx_level < GFX12 && flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
183          /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
184          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
185          radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
186 
187          *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
188       }
189 
190       /* GFX11 can't flush DB_META and should use a TS event instead. */
191       /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_DB_META ? */
192       if (gfx_level < GFX12 && gfx_level != GFX11 && (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
193          /* Flush HTILE. Will wait for idle later. */
194          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
195          radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
196 
197          *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
198       }
199 
200       /* First flush CB/DB, then L1/L2. */
201       gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
202 
203       if ((flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) ==
204           (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
205          cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
206       } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
207          cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
208       } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
209          if (gfx_level == GFX11) {
210             cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
211          } else {
212             cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
213          }
214       } else {
215          assert(0);
216       }
217    } else {
218       /* Wait for graphics shaders to go idle if requested. */
219       if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
220          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
221          radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
222 
223          *sqtt_flush_bits |= RGP_FLUSH_PS_PARTIAL_FLUSH;
224       } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
225          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
226          radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
227 
228          *sqtt_flush_bits |= RGP_FLUSH_VS_PARTIAL_FLUSH;
229       }
230    }
231 
232    if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
233       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
234       radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
235 
236       *sqtt_flush_bits |= RGP_FLUSH_CS_PARTIAL_FLUSH;
237    }
238 
239    if (cb_db_event) {
240       if (gfx_level >= GFX11) {
241          /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
242          unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
243          unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
244          unsigned glk_wb = G_586_GLK_WB(gcr_cntl);
245          unsigned glk_inv = G_586_GLK_INV(gcr_cntl);
246          unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
247          unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
248          assert(G_586_GL2_US(gcr_cntl) == 0);
249          assert(G_586_GL2_RANGE(gcr_cntl) == 0);
250          assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
251          unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
252          unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
253          unsigned gcr_seq = G_586_SEQ(gcr_cntl);
254 
255          gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLK_WB & C_586_GLK_INV & C_586_GLV_INV & C_586_GL1_INV &
256                      C_586_GL2_INV & C_586_GL2_WB; /* keep SEQ */
257 
258          /* Send an event that flushes caches. */
259          radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
260          radeon_emit(cs, S_490_EVENT_TYPE(cb_db_event) | S_490_EVENT_INDEX(5) | S_490_GLM_WB(glm_wb) |
261                             S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) | S_490_GL1_INV(gl1_inv) |
262                             S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) | S_490_SEQ(gcr_seq) | S_490_GLK_WB(glk_wb) |
263                             S_490_GLK_INV(glk_inv) | S_490_PWS_ENABLE(1));
264          radeon_emit(cs, 0); /* DST_SEL, INT_SEL, DATA_SEL */
265          radeon_emit(cs, 0); /* ADDRESS_LO */
266          radeon_emit(cs, 0); /* ADDRESS_HI */
267          radeon_emit(cs, 0); /* DATA_LO */
268          radeon_emit(cs, 0); /* DATA_HI */
269          radeon_emit(cs, 0); /* INT_CTXID */
270 
271          /* Wait for the event and invalidate remaining caches if needed. */
272          radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
273          radeon_emit(cs, S_580_PWS_STAGE_SEL(V_580_CP_PFP) | S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) |
274                             S_580_PWS_ENA2(1) | S_580_PWS_COUNT(0));
275          radeon_emit(cs, 0xffffffff); /* GCR_SIZE */
276          radeon_emit(cs, 0x01ffffff); /* GCR_SIZE_HI */
277          radeon_emit(cs, 0);          /* GCR_BASE_LO */
278          radeon_emit(cs, 0);          /* GCR_BASE_HI */
279          radeon_emit(cs, S_585_PWS_ENA(1));
280          radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
281 
282          gcr_cntl = 0; /* all done */
283       } else {
284          /* CB/DB flush and invalidate (or possibly just a wait for a
285           * meta flush) via RELEASE_MEM.
286           *
287           * Combine this with other cache flushes when possible; this
288           * requires affected shaders to be idle, so do it after the
289           * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
290           * implied).
291           */
292          /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
293          unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
294          unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
295          unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
296          unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
297          assert(G_586_GL2_US(gcr_cntl) == 0);
298          assert(G_586_GL2_RANGE(gcr_cntl) == 0);
299          assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
300          unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
301          unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
302          unsigned gcr_seq = G_586_SEQ(gcr_cntl);
303 
304          gcr_cntl &=
305             C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV & C_586_GL2_WB; /* keep SEQ */
306 
307          assert(flush_cnt);
308          (*flush_cnt)++;
309 
310          radv_cs_emit_write_event_eop(cs, gfx_level, qf, cb_db_event,
311                                       S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
312                                          S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
313                                          S_490_SEQ(gcr_seq),
314                                       EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, flush_va, *flush_cnt, gfx9_eop_bug_va);
315 
316          radv_cp_wait_mem(cs, qf, WAIT_REG_MEM_EQUAL, flush_va, *flush_cnt, 0xffffffff);
317       }
318    }
319 
320    /* VGT state sync */
321    if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
322       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
323       radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
324    }
325 
326    /* Ignore fields that only modify the behavior of other fields. */
327    if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
328       /* Flush caches and wait for the caches to assert idle.
329        * The cache flush is executed in the ME, but the PFP waits
330        * for completion.
331        */
332       radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
333       radeon_emit(cs, 0);          /* CP_COHER_CNTL */
334       radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
335       radeon_emit(cs, 0xffffff);   /* CP_COHER_SIZE_HI */
336       radeon_emit(cs, 0);          /* CP_COHER_BASE */
337       radeon_emit(cs, 0);          /* CP_COHER_BASE_HI */
338       radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
339       radeon_emit(cs, gcr_cntl);   /* GCR_CNTL */
340    } else if ((cb_db_event || (flush_bits & (RADV_CMD_FLAG_VS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
341                                              RADV_CMD_FLAG_CS_PARTIAL_FLUSH))) &&
342               !is_mec) {
343       /* We need to ensure that PFP waits as well. */
344       radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
345       radeon_emit(cs, 0);
346 
347       *sqtt_flush_bits |= RGP_FLUSH_PFP_SYNC_ME;
348    }
349 
350    if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
351       if (qf == RADV_QUEUE_GENERAL) {
352          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
353          radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
354       } else if (qf == RADV_QUEUE_COMPUTE) {
355          radeon_set_sh_reg(cs, R_00B828_COMPUTE_PIPELINESTAT_ENABLE, S_00B828_PIPELINESTAT_ENABLE(1));
356       }
357    } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
358       if (qf == RADV_QUEUE_GENERAL) {
359          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
360          radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
361       } else if (qf == RADV_QUEUE_COMPUTE) {
362          radeon_set_sh_reg(cs, R_00B828_COMPUTE_PIPELINESTAT_ENABLE, S_00B828_PIPELINESTAT_ENABLE(0));
363       }
364    }
365 }
366 
367 void
radv_cs_emit_cache_flush(struct radeon_winsys * ws,struct radeon_cmdbuf * cs,enum amd_gfx_level gfx_level,uint32_t * flush_cnt,uint64_t flush_va,enum radv_queue_family qf,enum radv_cmd_flush_bits flush_bits,enum rgp_flush_bits * sqtt_flush_bits,uint64_t gfx9_eop_bug_va)368 radv_cs_emit_cache_flush(struct radeon_winsys *ws, struct radeon_cmdbuf *cs, enum amd_gfx_level gfx_level,
369                          uint32_t *flush_cnt, uint64_t flush_va, enum radv_queue_family qf,
370                          enum radv_cmd_flush_bits flush_bits, enum rgp_flush_bits *sqtt_flush_bits,
371                          uint64_t gfx9_eop_bug_va)
372 {
373    unsigned cp_coher_cntl = 0;
374    uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB);
375 
376    radeon_check_space(ws, cs, 128);
377 
378    if (gfx_level >= GFX10) {
379       /* GFX10 cache flush handling is quite different. */
380       gfx10_cs_emit_cache_flush(cs, gfx_level, flush_cnt, flush_va, qf, flush_bits, sqtt_flush_bits, gfx9_eop_bug_va);
381       return;
382    }
383 
384    const bool is_mec = qf == RADV_QUEUE_COMPUTE && gfx_level >= GFX7;
385 
386    if (flush_bits & RADV_CMD_FLAG_INV_ICACHE) {
387       cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
388       *sqtt_flush_bits |= RGP_FLUSH_INVAL_ICACHE;
389    }
390    if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
391       cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
392       *sqtt_flush_bits |= RGP_FLUSH_INVAL_SMEM_L0;
393    }
394 
395    if (gfx_level <= GFX8) {
396       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
397          cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) | S_0085F0_CB1_DEST_BASE_ENA(1) |
398                           S_0085F0_CB2_DEST_BASE_ENA(1) | S_0085F0_CB3_DEST_BASE_ENA(1) |
399                           S_0085F0_CB4_DEST_BASE_ENA(1) | S_0085F0_CB5_DEST_BASE_ENA(1) |
400                           S_0085F0_CB6_DEST_BASE_ENA(1) | S_0085F0_CB7_DEST_BASE_ENA(1);
401 
402          /* Necessary for DCC */
403          if (gfx_level >= GFX8) {
404             radv_cs_emit_write_event_eop(cs, gfx_level, is_mec, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
405                                          EOP_DATA_SEL_DISCARD, 0, 0, gfx9_eop_bug_va);
406          }
407 
408          *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
409       }
410       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
411          cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
412 
413          *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
414       }
415    }
416 
417    if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
418       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
419       radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
420 
421       *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB;
422    }
423 
424    if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
425       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
426       radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
427 
428       *sqtt_flush_bits |= RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
429    }
430 
431    if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
432       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
433       radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
434 
435       *sqtt_flush_bits |= RGP_FLUSH_PS_PARTIAL_FLUSH;
436    } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
437       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
438       radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
439 
440       *sqtt_flush_bits |= RGP_FLUSH_VS_PARTIAL_FLUSH;
441    }
442 
443    if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
444       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
445       radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
446 
447       *sqtt_flush_bits |= RGP_FLUSH_CS_PARTIAL_FLUSH;
448    }
449 
450    if (gfx_level == GFX9 && flush_cb_db) {
451       unsigned cb_db_event, tc_flags;
452 
453       /* Set the CB/DB flush event. */
454       cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
455 
456       /* These are the only allowed combinations. If you need to
457        * do multiple operations at once, do them separately.
458        * All operations that invalidate L2 also seem to invalidate
459        * metadata. Volatile (VOL) and WC flushes are not listed here.
460        *
461        * TC    | TC_WB         = writeback & invalidate L2
462        * TC    | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
463        *         TC_WB | TC_NC = writeback L2 for MTYPE == NC
464        * TC            | TC_NC = invalidate L2 for MTYPE == NC
465        * TC    | TC_MD         = writeback & invalidate L2 metadata (DCC, etc.)
466        * TCL1                  = invalidate L1
467        */
468       tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
469 
470       *sqtt_flush_bits |= RGP_FLUSH_FLUSH_CB | RGP_FLUSH_INVAL_CB | RGP_FLUSH_FLUSH_DB | RGP_FLUSH_INVAL_DB;
471 
472       /* Ideally flush TC together with CB/DB. */
473       if (flush_bits & RADV_CMD_FLAG_INV_L2) {
474          /* Writeback and invalidate everything in L2. */
475          tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
476 
477          /* Clear the flags. */
478          flush_bits &= ~(RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_WB_L2);
479 
480          *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2;
481       }
482 
483       assert(flush_cnt);
484       (*flush_cnt)++;
485 
486       radv_cs_emit_write_event_eop(cs, gfx_level, false, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
487                                    EOP_DATA_SEL_VALUE_32BIT, flush_va, *flush_cnt, gfx9_eop_bug_va);
488       radv_cp_wait_mem(cs, qf, WAIT_REG_MEM_EQUAL, flush_va, *flush_cnt, 0xffffffff);
489    }
490 
491    /* VGT state sync */
492    if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
493       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
494       radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
495    }
496 
497    /* VGT streamout state sync */
498    if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
499       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
500       radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
501    }
502 
503    /* Make sure ME is idle (it executes most packets) before continuing.
504     * This prevents read-after-write hazards between PFP and ME.
505     */
506    if ((cp_coher_cntl || (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
507                                         RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_WB_L2))) &&
508        !is_mec) {
509       radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
510       radeon_emit(cs, 0);
511 
512       *sqtt_flush_bits |= RGP_FLUSH_PFP_SYNC_ME;
513    }
514 
515    if ((flush_bits & RADV_CMD_FLAG_INV_L2) || (gfx_level <= GFX7 && (flush_bits & RADV_CMD_FLAG_WB_L2))) {
516       radv_emit_acquire_mem(cs, is_mec, gfx_level == GFX9,
517                             cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
518                                S_0301F0_TC_WB_ACTION_ENA(gfx_level >= GFX8));
519       cp_coher_cntl = 0;
520 
521       *sqtt_flush_bits |= RGP_FLUSH_INVAL_L2 | RGP_FLUSH_INVAL_VMEM_L0;
522    } else {
523       if (flush_bits & RADV_CMD_FLAG_WB_L2) {
524          /* WB = write-back
525           * NC = apply to non-coherent MTYPEs
526           *      (i.e. MTYPE <= 1, which is what we use everywhere)
527           *
528           * WB doesn't work without NC.
529           */
530          radv_emit_acquire_mem(cs, is_mec, gfx_level == GFX9,
531                                cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
532          cp_coher_cntl = 0;
533 
534          *sqtt_flush_bits |= RGP_FLUSH_FLUSH_L2 | RGP_FLUSH_INVAL_VMEM_L0;
535       }
536       if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
537          radv_emit_acquire_mem(cs, is_mec, gfx_level == GFX9, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
538          cp_coher_cntl = 0;
539 
540          *sqtt_flush_bits |= RGP_FLUSH_INVAL_VMEM_L0;
541       }
542    }
543 
544    /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
545     * Therefore, it should be last. Done in PFP.
546     */
547    if (cp_coher_cntl)
548       radv_emit_acquire_mem(cs, is_mec, gfx_level == GFX9, cp_coher_cntl);
549 
550    if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
551       if (qf == RADV_QUEUE_GENERAL) {
552          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
553          radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
554       } else if (qf == RADV_QUEUE_COMPUTE) {
555          radeon_set_sh_reg(cs, R_00B828_COMPUTE_PIPELINESTAT_ENABLE, S_00B828_PIPELINESTAT_ENABLE(1));
556       }
557    } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
558       if (qf == RADV_QUEUE_GENERAL) {
559          radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
560          radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
561       } else if (qf == RADV_QUEUE_COMPUTE) {
562          radeon_set_sh_reg(cs, R_00B828_COMPUTE_PIPELINESTAT_ENABLE, S_00B828_PIPELINESTAT_ENABLE(0));
563       }
564    }
565 }
566 
567 void
radv_emit_cond_exec(const struct radv_device * device,struct radeon_cmdbuf * cs,uint64_t va,uint32_t count)568 radv_emit_cond_exec(const struct radv_device *device, struct radeon_cmdbuf *cs, uint64_t va, uint32_t count)
569 {
570    const struct radv_physical_device *pdev = radv_device_physical(device);
571    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
572 
573    if (gfx_level >= GFX7) {
574       radeon_emit(cs, PKT3(PKT3_COND_EXEC, 3, 0));
575       radeon_emit(cs, va);
576       radeon_emit(cs, va >> 32);
577       radeon_emit(cs, 0);
578       radeon_emit(cs, count);
579    } else {
580       radeon_emit(cs, PKT3(PKT3_COND_EXEC, 2, 0));
581       radeon_emit(cs, va);
582       radeon_emit(cs, va >> 32);
583       radeon_emit(cs, count);
584    }
585 }
586 
587 void
radv_cs_write_data_imm(struct radeon_cmdbuf * cs,unsigned engine_sel,uint64_t va,uint32_t imm)588 radv_cs_write_data_imm(struct radeon_cmdbuf *cs, unsigned engine_sel, uint64_t va, uint32_t imm)
589 {
590    radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
591    radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(engine_sel));
592    radeon_emit(cs, va);
593    radeon_emit(cs, va >> 32);
594    radeon_emit(cs, imm);
595 }
596