xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/meta/radv_meta_buffer.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 #include "nir/nir_builder.h"
2 #include "radv_cp_dma.h"
3 #include "radv_debug.h"
4 #include "radv_meta.h"
5 #include "radv_sdma.h"
6 
7 #include "radv_cs.h"
8 #include "sid.h"
9 #include "vk_common_entrypoints.h"
10 
11 static nir_shader *
build_buffer_fill_shader(struct radv_device * dev)12 build_buffer_fill_shader(struct radv_device *dev)
13 {
14    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
15    b.shader->info.workgroup_size[0] = 64;
16 
17    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
18    nir_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
19    nir_def *max_offset = nir_channel(&b, pconst, 2);
20    nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
21 
22    nir_def *global_id =
23       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
24                nir_load_local_invocation_index(&b));
25 
26    nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
27    nir_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
28    nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
29 
30    return b.shader;
31 }
32 
33 struct fill_constants {
34    uint64_t addr;
35    uint32_t max_offset;
36    uint32_t data;
37 };
38 
39 static VkResult
create_fill_pipeline(struct radv_device * device)40 create_fill_pipeline(struct radv_device *device)
41 {
42    VkResult result;
43 
44    const VkPushConstantRange pc_range = {
45       .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
46       .size = sizeof(struct fill_constants),
47    };
48 
49    result = radv_meta_create_pipeline_layout(device, NULL, 1, &pc_range, &device->meta_state.buffer.fill_p_layout);
50    if (result != VK_SUCCESS)
51       return result;
52 
53    nir_shader *cs = build_buffer_fill_shader(device);
54 
55    result = radv_meta_create_compute_pipeline(device, cs, device->meta_state.buffer.fill_p_layout,
56                                               &device->meta_state.buffer.fill_pipeline);
57 
58    ralloc_free(cs);
59    return result;
60 }
61 
62 static VkResult
get_fill_pipeline(struct radv_device * device,VkPipeline * pipeline_out)63 get_fill_pipeline(struct radv_device *device, VkPipeline *pipeline_out)
64 {
65    struct radv_meta_state *state = &device->meta_state;
66    VkResult result = VK_SUCCESS;
67 
68    mtx_lock(&state->mtx);
69    if (!state->buffer.fill_pipeline) {
70       result = create_fill_pipeline(device);
71       if (result != VK_SUCCESS)
72          goto fail;
73    }
74 
75    *pipeline_out = state->buffer.fill_pipeline;
76 
77 fail:
78    mtx_unlock(&state->mtx);
79    return result;
80 }
81 
82 static nir_shader *
build_buffer_copy_shader(struct radv_device * dev)83 build_buffer_copy_shader(struct radv_device *dev)
84 {
85    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
86    b.shader->info.workgroup_size[0] = 64;
87 
88    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
89    nir_def *max_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
90    nir_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
91    nir_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
92 
93    nir_def *global_id =
94       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
95                nir_load_local_invocation_index(&b));
96 
97    nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
98 
99    nir_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
100    nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
101 
102    return b.shader;
103 }
104 
105 struct copy_constants {
106    uint64_t src_addr;
107    uint64_t dst_addr;
108    uint32_t max_offset;
109 };
110 
111 static VkResult
create_copy_pipeline(struct radv_device * device)112 create_copy_pipeline(struct radv_device *device)
113 {
114    VkResult result;
115 
116    const VkPushConstantRange pc_range = {
117       .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
118       .size = sizeof(struct copy_constants),
119    };
120 
121    result = radv_meta_create_pipeline_layout(device, NULL, 1, &pc_range, &device->meta_state.buffer.copy_p_layout);
122    if (result != VK_SUCCESS)
123       return result;
124 
125    nir_shader *cs = build_buffer_copy_shader(device);
126 
127    result = radv_meta_create_compute_pipeline(device, cs, device->meta_state.buffer.copy_p_layout,
128                                               &device->meta_state.buffer.copy_pipeline);
129 
130    ralloc_free(cs);
131    return result;
132 }
133 
134 static VkResult
get_copy_pipeline(struct radv_device * device,VkPipeline * pipeline_out)135 get_copy_pipeline(struct radv_device *device, VkPipeline *pipeline_out)
136 {
137    struct radv_meta_state *state = &device->meta_state;
138    VkResult result = VK_SUCCESS;
139 
140    mtx_lock(&state->mtx);
141    if (!state->buffer.copy_pipeline) {
142       result = create_copy_pipeline(device);
143       if (result != VK_SUCCESS)
144          goto fail;
145    }
146 
147    *pipeline_out = state->buffer.copy_pipeline;
148 
149 fail:
150    mtx_unlock(&state->mtx);
151    return result;
152 }
153 
154 VkResult
radv_device_init_meta_buffer_state(struct radv_device * device,bool on_demand)155 radv_device_init_meta_buffer_state(struct radv_device *device, bool on_demand)
156 {
157    VkResult result;
158 
159    if (on_demand)
160       return VK_SUCCESS;
161 
162    result = create_fill_pipeline(device);
163    if (result != VK_SUCCESS)
164       return result;
165 
166    result = create_copy_pipeline(device);
167    if (result != VK_SUCCESS)
168       return result;
169 
170    return result;
171 }
172 
173 void
radv_device_finish_meta_buffer_state(struct radv_device * device)174 radv_device_finish_meta_buffer_state(struct radv_device *device)
175 {
176    struct radv_meta_state *state = &device->meta_state;
177 
178    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.copy_pipeline, &state->alloc);
179    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.fill_pipeline, &state->alloc);
180    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.copy_p_layout, &state->alloc);
181    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.fill_p_layout, &state->alloc);
182 }
183 
184 static void
fill_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t va,uint64_t size,uint32_t data)185 fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t size, uint32_t data)
186 {
187    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
188    struct radv_meta_saved_state saved_state;
189    VkPipeline pipeline;
190    VkResult result;
191 
192    result = get_fill_pipeline(device, &pipeline);
193    if (result != VK_SUCCESS) {
194       vk_command_buffer_set_error(&cmd_buffer->vk, result);
195       return;
196    }
197 
198    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
199 
200    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
201 
202    assert(size >= 16 && size <= UINT32_MAX);
203 
204    struct fill_constants fill_consts = {
205       .addr = va,
206       .max_offset = size - 16,
207       .data = data,
208    };
209 
210    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.buffer.fill_p_layout,
211                               VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(fill_consts), &fill_consts);
212 
213    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
214 
215    radv_meta_restore(&saved_state, cmd_buffer);
216 }
217 
218 static void
copy_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t src_va,uint64_t dst_va,uint64_t size)219 copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dst_va, uint64_t size)
220 {
221    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
222    struct radv_meta_saved_state saved_state;
223    VkPipeline pipeline;
224    VkResult result;
225 
226    result = get_copy_pipeline(device, &pipeline);
227    if (result != VK_SUCCESS) {
228       vk_command_buffer_set_error(&cmd_buffer->vk, result);
229       return;
230    }
231 
232    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
233 
234    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
235 
236    assert(size >= 16 && size <= UINT32_MAX);
237 
238    struct copy_constants copy_consts = {
239       .src_addr = src_va,
240       .dst_addr = dst_va,
241       .max_offset = size - 16,
242    };
243 
244    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.buffer.copy_p_layout,
245                               VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(copy_consts), &copy_consts);
246 
247    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
248 
249    radv_meta_restore(&saved_state, cmd_buffer);
250 }
251 
252 static bool
radv_prefer_compute_dma(const struct radv_device * device,uint64_t size,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo)253 radv_prefer_compute_dma(const struct radv_device *device, uint64_t size, struct radeon_winsys_bo *src_bo,
254                         struct radeon_winsys_bo *dst_bo)
255 {
256    const struct radv_physical_device *pdev = radv_device_physical(device);
257    bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
258 
259    if (pdev->info.gfx_level >= GFX10 && pdev->info.has_dedicated_vram) {
260       if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
261           (dst_bo && !(dst_bo->initial_domain & RADEON_DOMAIN_VRAM))) {
262          /* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
263          use_compute = false;
264       }
265    }
266 
267    return use_compute;
268 }
269 
270 uint32_t
radv_fill_buffer(struct radv_cmd_buffer * cmd_buffer,const struct radv_image * image,struct radeon_winsys_bo * bo,uint64_t va,uint64_t size,uint32_t value)271 radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image, struct radeon_winsys_bo *bo,
272                  uint64_t va, uint64_t size, uint32_t value)
273 {
274    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
275    bool use_compute = radv_prefer_compute_dma(device, size, NULL, bo);
276    uint32_t flush_bits = 0;
277 
278    assert(!(va & 3));
279    assert(!(size & 3));
280 
281    if (bo)
282       radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
283 
284    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) {
285       radv_sdma_fill_buffer(device, cmd_buffer->cs, va, size, value);
286    } else if (use_compute) {
287       fill_buffer_shader(cmd_buffer, va, size, value);
288 
289       flush_bits =
290          RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
291          radv_src_access_flush(cmd_buffer, VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT, VK_ACCESS_2_SHADER_WRITE_BIT, image);
292    } else if (size)
293       radv_cp_dma_clear_buffer(cmd_buffer, va, size, value);
294 
295    return flush_bits;
296 }
297 
298 void
radv_copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint64_t size)299 radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo,
300                  uint64_t src_offset, uint64_t dst_offset, uint64_t size)
301 {
302    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
303    bool use_compute =
304       !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) && radv_prefer_compute_dma(device, size, src_bo, dst_bo);
305 
306    uint64_t src_va = radv_buffer_get_va(src_bo) + src_offset;
307    uint64_t dst_va = radv_buffer_get_va(dst_bo) + dst_offset;
308 
309    radv_cs_add_buffer(device->ws, cmd_buffer->cs, src_bo);
310    radv_cs_add_buffer(device->ws, cmd_buffer->cs, dst_bo);
311 
312    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER)
313       radv_sdma_copy_buffer(device, cmd_buffer->cs, src_va, dst_va, size);
314    else if (use_compute)
315       copy_buffer_shader(cmd_buffer, src_va, dst_va, size);
316    else if (size)
317       radv_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
318 }
319 
320 VKAPI_ATTR void VKAPI_CALL
radv_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)321 radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize fillSize,
322                    uint32_t data)
323 {
324    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
325    VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
326 
327    fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize) & ~3ull;
328 
329    radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo,
330                     radv_buffer_get_va(dst_buffer->bo) + dst_buffer->offset + dstOffset, fillSize, data);
331 }
332 
333 static void
copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radv_buffer * src_buffer,struct radv_buffer * dst_buffer,const VkBufferCopy2 * region)334 copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer, struct radv_buffer *dst_buffer,
335             const VkBufferCopy2 *region)
336 {
337    bool old_predicating;
338 
339    /* VK_EXT_conditional_rendering says that copy commands should not be
340     * affected by conditional rendering.
341     */
342    old_predicating = cmd_buffer->state.predicating;
343    cmd_buffer->state.predicating = false;
344 
345    radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo, src_buffer->offset + region->srcOffset,
346                     dst_buffer->offset + region->dstOffset, region->size);
347 
348    /* Restore conditional rendering. */
349    cmd_buffer->state.predicating = old_predicating;
350 }
351 
352 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer,const VkCopyBufferInfo2 * pCopyBufferInfo)353 radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfo)
354 {
355    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
356    VK_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
357    VK_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
358 
359    for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
360       copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
361    }
362 }
363 
364 void
radv_update_buffer_cp(struct radv_cmd_buffer * cmd_buffer,uint64_t va,const void * data,uint64_t size)365 radv_update_buffer_cp(struct radv_cmd_buffer *cmd_buffer, uint64_t va, const void *data, uint64_t size)
366 {
367    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
368    uint64_t words = size / 4;
369    bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
370 
371    assert(size < RADV_BUFFER_UPDATE_THRESHOLD);
372 
373    radv_emit_cache_flush(cmd_buffer);
374    radeon_check_space(device->ws, cmd_buffer->cs, words + 4);
375 
376    radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
377    radeon_emit(cmd_buffer->cs,
378                S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
379    radeon_emit(cmd_buffer->cs, va);
380    radeon_emit(cmd_buffer->cs, va >> 32);
381    radeon_emit_array(cmd_buffer->cs, data, words);
382 
383    if (radv_device_fault_detection_enabled(device))
384       radv_cmd_buffer_trace_emit(cmd_buffer);
385 }
386 
387 VKAPI_ATTR void VKAPI_CALL
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)388 radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
389                      const void *pData)
390 {
391    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
392    VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
393    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
394    uint64_t va = radv_buffer_get_va(dst_buffer->bo);
395    va += dstOffset + dst_buffer->offset;
396 
397    assert(!(dataSize & 3));
398    assert(!(va & 3));
399 
400    if (!dataSize)
401       return;
402 
403    if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD && cmd_buffer->qf != RADV_QUEUE_TRANSFER) {
404       radv_cs_add_buffer(device->ws, cmd_buffer->cs, dst_buffer->bo);
405       radv_update_buffer_cp(cmd_buffer, va, pData, dataSize);
406    } else {
407       uint32_t buf_offset;
408       radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
409       radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
410                        dstOffset + dst_buffer->offset, dataSize);
411    }
412 }
413