xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/radeonsi/si_cp_dma.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_pipe.h"
8 #include "sid.h"
9 #include "si_build_pm4.h"
10 
11 /* Set this if you want the ME to wait until CP DMA is done.
12  * It should be set on the last CP DMA packet. */
13 #define CP_DMA_SYNC        (1 << 0)
14 
15 /* Set this if the source data was used as a destination in a previous CP DMA
16  * packet. It's for preventing a read-after-write (RAW) hazard between two
17  * CP DMA packets. */
18 #define CP_DMA_RAW_WAIT    (1 << 1)
19 #define CP_DMA_CLEAR       (1 << 2)
20 
cp_dma_use_L2(struct si_context * sctx)21 static bool cp_dma_use_L2(struct si_context *sctx)
22 {
23    return sctx->gfx_level >= GFX7 && !sctx->screen->info.cp_sdma_ge_use_system_memory_scope;
24 }
25 
26 /* The max number of bytes that can be copied per packet. */
cp_dma_max_byte_count(struct si_context * sctx)27 static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
28 {
29    unsigned max =
30       sctx->gfx_level >= GFX11 ? 32767 :
31       sctx->gfx_level >= GFX9 ? S_415_BYTE_COUNT_GFX9(~0u) : S_415_BYTE_COUNT_GFX6(~0u);
32 
33    /* make it aligned for optimal performance */
34    return max & ~(SI_CPDMA_ALIGNMENT - 1);
35 }
36 
37 /* should cp dma skip the hole in sparse bo */
cp_dma_sparse_wa(struct si_context * sctx,struct si_resource * sdst)38 static inline bool cp_dma_sparse_wa(struct si_context *sctx, struct si_resource *sdst)
39 {
40    return sctx->gfx_level == GFX9 && sdst->flags & RADEON_FLAG_SPARSE;
41 }
42 
43 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
44  * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
45  * clear value.
46  */
si_emit_cp_dma(struct si_context * sctx,struct radeon_cmdbuf * cs,uint64_t dst_va,uint64_t src_va,unsigned size,unsigned flags)47 static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, uint64_t dst_va,
48                            uint64_t src_va, unsigned size, unsigned flags)
49 {
50    uint32_t header = 0, command = 0;
51 
52    assert(sctx->screen->info.has_cp_dma);
53    assert(size <= cp_dma_max_byte_count(sctx));
54 
55    if (sctx->gfx_level >= GFX9)
56       command |= S_415_BYTE_COUNT_GFX9(size);
57    else
58       command |= S_415_BYTE_COUNT_GFX6(size);
59 
60    /* Sync flags. */
61    if (flags & CP_DMA_SYNC)
62       header |= S_411_CP_SYNC(1);
63 
64    if (flags & CP_DMA_RAW_WAIT)
65       command |= S_415_RAW_WAIT(1);
66 
67    /* Src and dst flags. */
68    if (cp_dma_use_L2(sctx))
69       header |= S_501_DST_SEL(V_501_DST_ADDR_TC_L2);
70 
71    if (flags & CP_DMA_CLEAR) {
72       header |= S_411_SRC_SEL(V_411_DATA);
73    } else if (cp_dma_use_L2(sctx)) {
74       header |= S_501_SRC_SEL(V_501_SRC_ADDR_TC_L2);
75    }
76 
77    radeon_begin(cs);
78 
79    if (sctx->gfx_level >= GFX7) {
80       radeon_emit(PKT3(PKT3_DMA_DATA, 5, 0));
81       radeon_emit(header);
82       radeon_emit(src_va);       /* SRC_ADDR_LO [31:0] */
83       radeon_emit(src_va >> 32); /* SRC_ADDR_HI [31:0] */
84       radeon_emit(dst_va);       /* DST_ADDR_LO [31:0] */
85       radeon_emit(dst_va >> 32); /* DST_ADDR_HI [31:0] */
86       radeon_emit(command);
87    } else {
88       header |= S_411_SRC_ADDR_HI(src_va >> 32);
89 
90       radeon_emit(PKT3(PKT3_CP_DMA, 4, 0));
91       radeon_emit(src_va);                  /* SRC_ADDR_LO [31:0] */
92       radeon_emit(header);                  /* SRC_ADDR_HI [15:0] + flags. */
93       radeon_emit(dst_va);                  /* DST_ADDR_LO [31:0] */
94       radeon_emit((dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
95       radeon_emit(command);
96    }
97    radeon_end();
98 }
99 
si_cp_dma_wait_for_idle(struct si_context * sctx,struct radeon_cmdbuf * cs)100 void si_cp_dma_wait_for_idle(struct si_context *sctx, struct radeon_cmdbuf *cs)
101 {
102    /* Issue a dummy DMA that copies zero bytes.
103     *
104     * The DMA engine will see that there's no work to do and skip this
105     * DMA request, however, the CP will see the sync flag and still wait
106     * for all DMAs to complete.
107     */
108    si_emit_cp_dma(sctx, cs, 0, 0, 0, CP_DMA_SYNC);
109 }
110 
si_cp_dma_prepare(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned byte_count,uint64_t remaining_size,bool * is_first,unsigned * packet_flags)111 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
112                               struct pipe_resource *src, unsigned byte_count,
113                               uint64_t remaining_size, bool *is_first, unsigned *packet_flags)
114 {
115    si_need_gfx_cs_space(sctx, 0);
116 
117    /* This must be done after need_cs_space. */
118    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(dst),
119                              RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
120    if (src)
121       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(src),
122                                 RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
123 
124    /* Flush the caches for the first copy only.
125     * Also wait for the previous CP DMA operations.
126     */
127    if (*is_first)
128       si_emit_barrier_direct(sctx);
129 
130    if (*is_first && !(*packet_flags & CP_DMA_CLEAR))
131       *packet_flags |= CP_DMA_RAW_WAIT;
132 
133    *is_first = false;
134 
135    /* Do the synchronization after the last dma, so that all data
136     * is written to memory.
137     */
138    if (byte_count == remaining_size)
139       *packet_flags |= CP_DMA_SYNC;
140 }
141 
si_cp_dma_clear_buffer(struct si_context * sctx,struct radeon_cmdbuf * cs,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)142 void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
143                             struct pipe_resource *dst, uint64_t offset, uint64_t size,
144                             unsigned value)
145 {
146    struct si_resource *sdst = si_resource(dst);
147    uint64_t va = sdst->gpu_address + offset;
148    bool is_first = true;
149 
150    assert(!sctx->screen->info.cp_sdma_ge_use_system_memory_scope);
151    assert(size && size % 4 == 0);
152 
153    if (!cp_dma_use_L2(sctx)) {
154       sctx->barrier_flags |= SI_BARRIER_INV_L2;
155       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
156    }
157 
158    /* Mark the buffer range of destination as valid (initialized),
159     * so that transfer_map knows it should wait for the GPU when mapping
160     * that range. */
161    util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
162 
163    while (size) {
164       unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
165       unsigned dma_flags = CP_DMA_CLEAR;
166 
167       if (cp_dma_sparse_wa(sctx, sdst)) {
168          unsigned skip_count =
169             sctx->ws->buffer_find_next_committed_memory(sdst->buf,
170                   va - sdst->gpu_address, &byte_count);
171          va += skip_count;
172          size -= skip_count;
173       }
174 
175       if (!byte_count)
176          continue;
177 
178       si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, &is_first, &dma_flags);
179 
180       /* Emit the clear packet. */
181       si_emit_cp_dma(sctx, cs, va, value, byte_count, dma_flags);
182 
183       size -= byte_count;
184       va += byte_count;
185    }
186 
187    sctx->num_cp_dma_calls++;
188 }
189 
190 /**
191  * Realign the CP DMA engine. This must be done after a copy with an unaligned
192  * size.
193  *
194  * \param size  Remaining size to the CP DMA alignment.
195  */
si_cp_dma_realign_engine(struct si_context * sctx,unsigned size,bool * is_first)196 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size, bool *is_first)
197 {
198    uint64_t va;
199    unsigned dma_flags = 0;
200    unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
201 
202    assert(size < SI_CPDMA_ALIGNMENT);
203 
204    /* Use the scratch buffer as the dummy buffer. The 3D engine should be
205     * idle at this point.
206     */
207    if (!sctx->scratch_buffer || sctx->scratch_buffer->b.b.width0 < scratch_size) {
208       si_resource_reference(&sctx->scratch_buffer, NULL);
209       sctx->scratch_buffer = si_aligned_buffer_create(&sctx->screen->b,
210                                                       PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL |
211                                                       SI_RESOURCE_FLAG_DISCARDABLE,
212                                                       PIPE_USAGE_DEFAULT, scratch_size, 256);
213       if (!sctx->scratch_buffer)
214          return;
215 
216       si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
217    }
218 
219    si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b, &sctx->scratch_buffer->b.b, size, size,
220                      is_first, &dma_flags);
221 
222    va = sctx->scratch_buffer->gpu_address;
223    si_emit_cp_dma(sctx, &sctx->gfx_cs, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags);
224 }
225 
226 /**
227  * Do memcpy between buffers using CP DMA.
228  */
si_cp_dma_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size)229 void si_cp_dma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
230                            struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset,
231                            unsigned size)
232 {
233    assert(size);
234    assert(dst && src);
235 
236    if (!cp_dma_use_L2(sctx)) {
237       sctx->barrier_flags |= SI_BARRIER_INV_L2;
238       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
239    }
240 
241    /* Mark the buffer range of destination as valid (initialized),
242     * so that transfer_map knows it should wait for the GPU when mapping
243     * that range.
244     */
245    util_range_add(dst, &si_resource(dst)->valid_buffer_range, dst_offset, dst_offset + size);
246 
247    dst_offset += si_resource(dst)->gpu_address;
248    src_offset += si_resource(src)->gpu_address;
249 
250    unsigned skipped_size = 0;
251    unsigned realign_size = 0;
252 
253    /* The workarounds aren't needed on Fiji and beyond. */
254    if (sctx->family <= CHIP_CARRIZO || sctx->family == CHIP_STONEY) {
255       /* If the size is not aligned, we must add a dummy copy at the end
256        * just to align the internal counter. Otherwise, the DMA engine
257        * would slow down by an order of magnitude for following copies.
258        */
259       if (size % SI_CPDMA_ALIGNMENT)
260          realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
261 
262       /* If the copy begins unaligned, we must start copying from the next
263        * aligned block and the skipped part should be copied after everything
264        * else has been copied. Only the src alignment matters, not dst.
265        */
266       if (src_offset % SI_CPDMA_ALIGNMENT) {
267          skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
268          /* The main part will be skipped if the size is too small. */
269          skipped_size = MIN2(skipped_size, size);
270          size -= skipped_size;
271       }
272    }
273 
274    /* TMZ handling */
275    if (unlikely(radeon_uses_secure_bos(sctx->ws))) {
276       bool secure = si_resource(src)->flags & RADEON_FLAG_ENCRYPTED;
277       assert(!secure || si_resource(dst)->flags & RADEON_FLAG_ENCRYPTED);
278       if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
279          si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
280                                RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
281       }
282    }
283 
284    /* This is the main part doing the copying. Src is always aligned. */
285    uint64_t main_dst_offset = dst_offset + skipped_size;
286    uint64_t main_src_offset = src_offset + skipped_size;
287    bool is_first = true;
288 
289    while (size) {
290       unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
291       unsigned dma_flags = 0;
292 
293       if (cp_dma_sparse_wa(sctx, si_resource(dst))) {
294          unsigned skip_count =
295             sctx->ws->buffer_find_next_committed_memory(si_resource(dst)->buf,
296                   main_dst_offset - si_resource(dst)->gpu_address, &byte_count);
297          main_dst_offset += skip_count;
298          main_src_offset += skip_count;
299          size -= skip_count;
300       }
301 
302       if (cp_dma_sparse_wa(sctx, si_resource(src))) {
303          unsigned skip_count =
304             sctx->ws->buffer_find_next_committed_memory(si_resource(src)->buf,
305                   main_src_offset - si_resource(src)->gpu_address, &byte_count);
306          main_dst_offset += skip_count;
307          main_src_offset += skip_count;
308          size -= skip_count;
309       }
310 
311       if (!byte_count)
312          continue;
313 
314       si_cp_dma_prepare(sctx, dst, src, byte_count, size + skipped_size + realign_size,
315                         &is_first, &dma_flags);
316 
317       si_emit_cp_dma(sctx, &sctx->gfx_cs, main_dst_offset, main_src_offset, byte_count, dma_flags);
318 
319       size -= byte_count;
320       main_src_offset += byte_count;
321       main_dst_offset += byte_count;
322    }
323 
324    /* Copy the part we skipped because src wasn't aligned. */
325    if (skipped_size) {
326       unsigned dma_flags = 0;
327 
328       si_cp_dma_prepare(sctx, dst, src, skipped_size, skipped_size + realign_size,
329                         &is_first, &dma_flags);
330 
331       si_emit_cp_dma(sctx, &sctx->gfx_cs, dst_offset, src_offset, skipped_size, dma_flags);
332    }
333 
334    /* Finally, realign the engine if the size wasn't aligned. */
335    if (realign_size)
336       si_cp_dma_realign_engine(sctx, realign_size, &is_first);
337 
338    sctx->num_cp_dma_calls++;
339 }
340 
si_cp_write_data(struct si_context * sctx,struct si_resource * buf,unsigned offset,unsigned size,unsigned dst_sel,unsigned engine,const void * data)341 void si_cp_write_data(struct si_context *sctx, struct si_resource *buf, unsigned offset,
342                       unsigned size, unsigned dst_sel, unsigned engine, const void *data)
343 {
344    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
345 
346    assert(offset % 4 == 0);
347    assert(size % 4 == 0);
348 
349    if (sctx->gfx_level == GFX6 && dst_sel == V_370_MEM)
350       dst_sel = V_370_MEM_GRBM;
351 
352    radeon_add_to_buffer_list(sctx, cs, buf, RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
353    uint64_t va = buf->gpu_address + offset;
354 
355    radeon_begin(cs);
356    radeon_emit(PKT3(PKT3_WRITE_DATA, 2 + size / 4, 0));
357    radeon_emit(S_370_DST_SEL(dst_sel) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(engine));
358    radeon_emit(va);
359    radeon_emit(va >> 32);
360    radeon_emit_array((const uint32_t *)data, size / 4);
361    radeon_end();
362 }
363 
si_cp_copy_data(struct si_context * sctx,struct radeon_cmdbuf * cs,unsigned dst_sel,struct si_resource * dst,unsigned dst_offset,unsigned src_sel,struct si_resource * src,unsigned src_offset)364 void si_cp_copy_data(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned dst_sel,
365                      struct si_resource *dst, unsigned dst_offset, unsigned src_sel,
366                      struct si_resource *src, unsigned src_offset)
367 {
368    /* cs can point to the compute IB, which has the buffer list in gfx_cs. */
369    if (dst) {
370       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, dst, RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
371    }
372    if (src) {
373       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, src, RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
374    }
375 
376    uint64_t dst_va = (dst ? dst->gpu_address : 0ull) + dst_offset;
377    uint64_t src_va = (src ? src->gpu_address : 0ull) + src_offset;
378 
379    radeon_begin(cs);
380    radeon_emit(PKT3(PKT3_COPY_DATA, 4, 0));
381    radeon_emit(COPY_DATA_SRC_SEL(src_sel) | COPY_DATA_DST_SEL(dst_sel) | COPY_DATA_WR_CONFIRM);
382    radeon_emit(src_va);
383    radeon_emit(src_va >> 32);
384    radeon_emit(dst_va);
385    radeon_emit(dst_va >> 32);
386    radeon_end();
387 }
388