xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/radv_queue.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * SPDX-License-Identifier: MIT
9  */
10 
11 #include "radv_queue.h"
12 #include "radv_buffer.h"
13 #include "radv_cp_reg_shadowing.h"
14 #include "radv_cs.h"
15 #include "radv_debug.h"
16 #include "radv_device_memory.h"
17 #include "radv_image.h"
18 #include "radv_printf.h"
19 #include "radv_rmv.h"
20 #include "vk_semaphore.h"
21 #include "vk_sync.h"
22 
23 #include "ac_cmdbuf.h"
24 #include "ac_debug.h"
25 #include "ac_descriptors.h"
26 
27 enum radeon_ctx_priority
radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoKHR * pObj)28 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoKHR *pObj)
29 {
30    /* Default to MEDIUM when a specific global priority isn't requested */
31    if (!pObj)
32       return RADEON_CTX_PRIORITY_MEDIUM;
33 
34    switch (pObj->globalPriority) {
35    case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR:
36       return RADEON_CTX_PRIORITY_REALTIME;
37    case VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR:
38       return RADEON_CTX_PRIORITY_HIGH;
39    case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR:
40       return RADEON_CTX_PRIORITY_MEDIUM;
41    case VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR:
42       return RADEON_CTX_PRIORITY_LOW;
43    default:
44       unreachable("Illegal global priority value");
45       return RADEON_CTX_PRIORITY_INVALID;
46    }
47 }
48 
49 static VkResult
radv_sparse_buffer_bind_memory(struct radv_device * device,const VkSparseBufferMemoryBindInfo * bind)50 radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
51 {
52    VK_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
53    VkResult result = VK_SUCCESS;
54 
55    struct radv_device_memory *mem = NULL;
56    VkDeviceSize resourceOffset = 0;
57    VkDeviceSize size = 0;
58    VkDeviceSize memoryOffset = 0;
59    for (uint32_t i = 0; i < bind->bindCount; ++i) {
60       struct radv_device_memory *cur_mem = NULL;
61 
62       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
63          cur_mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
64       if (i && mem == cur_mem) {
65          if (mem) {
66             if (bind->pBinds[i].resourceOffset == resourceOffset + size &&
67                 bind->pBinds[i].memoryOffset == memoryOffset + size) {
68                size += bind->pBinds[i].size;
69                continue;
70             }
71          } else {
72             if (bind->pBinds[i].resourceOffset == resourceOffset + size) {
73                size += bind->pBinds[i].size;
74                continue;
75             }
76          }
77       }
78       if (size) {
79          result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
80                                        memoryOffset);
81          if (result != VK_SUCCESS)
82             return result;
83       }
84       mem = cur_mem;
85       resourceOffset = bind->pBinds[i].resourceOffset;
86       size = bind->pBinds[i].size;
87       memoryOffset = bind->pBinds[i].memoryOffset;
88    }
89    if (size) {
90       result = radv_bo_virtual_bind(device, &buffer->vk.base, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL,
91                                     memoryOffset);
92    }
93 
94    return result;
95 }
96 
97 static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device * device,const VkSparseImageOpaqueMemoryBindInfo * bind)98 radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind)
99 {
100    VK_FROM_HANDLE(radv_image, image, bind->image);
101    VkResult result;
102 
103    for (uint32_t i = 0; i < bind->bindCount; ++i) {
104       struct radv_device_memory *mem = NULL;
105 
106       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
107          mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
108 
109       result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bind->pBinds[i].resourceOffset,
110                                     bind->pBinds[i].size, mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
111       if (result != VK_SUCCESS)
112          return result;
113    }
114 
115    return VK_SUCCESS;
116 }
117 
118 static VkResult
radv_sparse_image_bind_memory(struct radv_device * device,const VkSparseImageMemoryBindInfo * bind)119 radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
120 {
121    VK_FROM_HANDLE(radv_image, image, bind->image);
122    const struct radv_physical_device *pdev = radv_device_physical(device);
123    struct radeon_surf *surface = &image->planes[0].surface;
124    uint32_t bs = vk_format_get_blocksize(image->vk.format);
125    VkResult result;
126 
127    for (uint32_t i = 0; i < bind->bindCount; ++i) {
128       struct radv_device_memory *mem = NULL;
129       uint64_t offset, depth_pitch;
130       uint32_t pitch;
131       uint64_t mem_offset = bind->pBinds[i].memoryOffset;
132       const uint32_t layer = bind->pBinds[i].subresource.arrayLayer;
133       const uint32_t level = bind->pBinds[i].subresource.mipLevel;
134 
135       VkExtent3D bind_extent = bind->pBinds[i].extent;
136       bind_extent.width = DIV_ROUND_UP(bind_extent.width, vk_format_get_blockwidth(image->vk.format));
137       bind_extent.height = DIV_ROUND_UP(bind_extent.height, vk_format_get_blockheight(image->vk.format));
138 
139       VkOffset3D bind_offset = bind->pBinds[i].offset;
140       bind_offset.x /= vk_format_get_blockwidth(image->vk.format);
141       bind_offset.y /= vk_format_get_blockheight(image->vk.format);
142 
143       if (bind->pBinds[i].memory != VK_NULL_HANDLE)
144          mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
145 
146       if (pdev->info.gfx_level >= GFX9) {
147          offset = surface->u.gfx9.surf_slice_size * layer + surface->u.gfx9.prt_level_offset[level];
148          pitch = surface->u.gfx9.prt_level_pitch[level];
149          depth_pitch = surface->u.gfx9.surf_slice_size;
150       } else {
151          depth_pitch = surface->u.legacy.level[level].slice_size_dw * 4;
152          offset = (uint64_t)surface->u.legacy.level[level].offset_256B * 256 + depth_pitch * layer;
153          pitch = surface->u.legacy.level[level].nblk_x;
154       }
155 
156       offset +=
157          bind_offset.z * depth_pitch + ((uint64_t)bind_offset.y * pitch * surface->prt_tile_depth +
158                                         (uint64_t)bind_offset.x * surface->prt_tile_height * surface->prt_tile_depth) *
159                                           bs;
160 
161       uint32_t aligned_extent_width = ALIGN(bind_extent.width, surface->prt_tile_width);
162       uint32_t aligned_extent_height = ALIGN(bind_extent.height, surface->prt_tile_height);
163       uint32_t aligned_extent_depth = ALIGN(bind_extent.depth, surface->prt_tile_depth);
164 
165       bool whole_subres = (bind_extent.height <= surface->prt_tile_height || aligned_extent_width == pitch) &&
166                           (bind_extent.depth <= surface->prt_tile_depth ||
167                            (uint64_t)aligned_extent_width * aligned_extent_height * bs == depth_pitch);
168 
169       if (whole_subres) {
170          uint64_t size = (uint64_t)aligned_extent_width * aligned_extent_height * aligned_extent_depth * bs;
171          result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, offset, size,
172                                        mem ? mem->bo : NULL, mem_offset);
173          if (result != VK_SUCCESS)
174             return result;
175       } else {
176          uint32_t img_y_increment = pitch * bs * surface->prt_tile_depth;
177          uint32_t mem_y_increment = aligned_extent_width * bs * surface->prt_tile_depth;
178          uint64_t mem_z_increment = (uint64_t)aligned_extent_width * aligned_extent_height * bs;
179          uint64_t size = mem_y_increment * surface->prt_tile_height;
180          for (unsigned z = 0; z < bind_extent.depth;
181               z += surface->prt_tile_depth, offset += depth_pitch * surface->prt_tile_depth) {
182             for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
183                uint64_t bo_offset = offset + (uint64_t)img_y_increment * y;
184 
185                result = radv_bo_virtual_bind(device, &image->vk.base, image->bindings[0].bo, bo_offset, size,
186                                              mem ? mem->bo : NULL,
187                                              mem_offset + (uint64_t)mem_y_increment * y + mem_z_increment * z);
188                if (result != VK_SUCCESS)
189                   return result;
190             }
191          }
192       }
193    }
194 
195    return VK_SUCCESS;
196 }
197 
198 static VkResult
radv_queue_submit_bind_sparse_memory(struct radv_device * device,struct vk_queue_submit * submission)199 radv_queue_submit_bind_sparse_memory(struct radv_device *device, struct vk_queue_submit *submission)
200 {
201    for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
202       VkResult result = radv_sparse_buffer_bind_memory(device, submission->buffer_binds + i);
203       if (result != VK_SUCCESS)
204          return result;
205    }
206 
207    for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
208       VkResult result = radv_sparse_image_opaque_bind_memory(device, submission->image_opaque_binds + i);
209       if (result != VK_SUCCESS)
210          return result;
211    }
212 
213    for (uint32_t i = 0; i < submission->image_bind_count; ++i) {
214       VkResult result = radv_sparse_image_bind_memory(device, submission->image_binds + i);
215       if (result != VK_SUCCESS)
216          return result;
217    }
218 
219    return VK_SUCCESS;
220 }
221 
222 static VkResult
radv_queue_submit_empty(struct radv_queue * queue,struct vk_queue_submit * submission)223 radv_queue_submit_empty(struct radv_queue *queue, struct vk_queue_submit *submission)
224 {
225    struct radv_device *device = radv_queue_device(queue);
226    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
227    struct radv_winsys_submit_info submit = {
228       .ip_type = radv_queue_ring(queue),
229       .queue_index = queue->vk.index_in_family,
230    };
231 
232    return device->ws->cs_submit(ctx, &submit, submission->wait_count, submission->waits, submission->signal_count,
233                                 submission->signals);
234 }
235 
236 static void
radv_set_ring_buffer(const struct radv_physical_device * pdev,struct radeon_winsys_bo * bo,uint32_t offset,uint32_t ring_size,bool add_tid,bool swizzle_enable,bool oob_select_raw,uint32_t element_size,uint32_t index_stride,uint32_t desc[4])237 radv_set_ring_buffer(const struct radv_physical_device *pdev, struct radeon_winsys_bo *bo, uint32_t offset,
238                      uint32_t ring_size, bool add_tid, bool swizzle_enable, bool oob_select_raw, uint32_t element_size,
239                      uint32_t index_stride, uint32_t desc[4])
240 {
241    const uint8_t oob_select = oob_select_raw ? V_008F0C_OOB_SELECT_RAW : V_008F0C_OOB_SELECT_DISABLED;
242    const uint64_t va = radv_buffer_get_va(bo) + offset;
243    const struct ac_buffer_state ac_state = {
244       .va = va,
245       .size = ring_size,
246       .format = PIPE_FORMAT_R32_FLOAT,
247       .swizzle =
248          {
249             PIPE_SWIZZLE_X,
250             PIPE_SWIZZLE_Y,
251             PIPE_SWIZZLE_Z,
252             PIPE_SWIZZLE_W,
253          },
254       .swizzle_enable = swizzle_enable,
255       .element_size = element_size,
256       .index_stride = index_stride,
257       .add_tid = add_tid,
258       .gfx10_oob_select = oob_select,
259    };
260 
261    ac_build_buffer_descriptor(pdev->info.gfx_level, &ac_state, desc);
262 }
263 
264 static void
radv_fill_shader_rings(struct radv_device * device,uint32_t * desc,struct radeon_winsys_bo * scratch_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * esgs_ring_bo,uint32_t gsvs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,struct radeon_winsys_bo * tess_rings_bo,struct radeon_winsys_bo * task_rings_bo,struct radeon_winsys_bo * mesh_scratch_ring_bo,uint32_t attr_ring_size,struct radeon_winsys_bo * attr_ring_bo)265 radv_fill_shader_rings(struct radv_device *device, uint32_t *desc, struct radeon_winsys_bo *scratch_bo,
266                        uint32_t esgs_ring_size, struct radeon_winsys_bo *esgs_ring_bo, uint32_t gsvs_ring_size,
267                        struct radeon_winsys_bo *gsvs_ring_bo, struct radeon_winsys_bo *tess_rings_bo,
268                        struct radeon_winsys_bo *task_rings_bo, struct radeon_winsys_bo *mesh_scratch_ring_bo,
269                        uint32_t attr_ring_size, struct radeon_winsys_bo *attr_ring_bo)
270 {
271    const struct radv_physical_device *pdev = radv_device_physical(device);
272 
273    if (scratch_bo) {
274       uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
275       uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
276 
277       if (pdev->info.gfx_level >= GFX11)
278          rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
279       else
280          rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
281 
282       desc[0] = scratch_va;
283       desc[1] = rsrc1;
284    }
285 
286    desc += 4;
287 
288    if (esgs_ring_bo) {
289       /* stride 0, num records - size, add tid, swizzle, elsize4,
290          index stride 64 */
291       radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, true, true, false, 1, 3, &desc[0]);
292 
293       /* GS entry for ES->GS ring */
294       /* stride 0, num records - size, elsize0,
295          index stride 0 */
296       radv_set_ring_buffer(pdev, esgs_ring_bo, 0, esgs_ring_size, false, false, false, 0, 0, &desc[4]);
297    }
298 
299    desc += 8;
300 
301    if (gsvs_ring_bo) {
302       /* VS entry for GS->VS ring */
303       /* stride 0, num records - size, elsize0,
304          index stride 0 */
305       radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, gsvs_ring_size, false, false, false, 0, 0, &desc[0]);
306 
307       /* stride gsvs_itemsize, num records 64
308          elsize 4, index stride 16 */
309       /* shader will patch stride and desc[2] */
310       radv_set_ring_buffer(pdev, gsvs_ring_bo, 0, 0, true, true, false, 1, 1, &desc[4]);
311    }
312 
313    desc += 8;
314 
315    if (tess_rings_bo) {
316       radv_set_ring_buffer(pdev, tess_rings_bo, 0, pdev->hs.tess_factor_ring_size, false, false, true, 0, 0, &desc[0]);
317 
318       radv_set_ring_buffer(pdev, tess_rings_bo, pdev->hs.tess_offchip_ring_offset, pdev->hs.tess_offchip_ring_size,
319                            false, false, true, 0, 0, &desc[4]);
320    }
321 
322    desc += 8;
323 
324    if (task_rings_bo) {
325       radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.draw_ring_offset,
326                            pdev->task_info.num_entries * AC_TASK_DRAW_ENTRY_BYTES, false, false, false, 0, 0, &desc[0]);
327 
328       radv_set_ring_buffer(pdev, task_rings_bo, pdev->task_info.payload_ring_offset,
329                            pdev->task_info.num_entries * AC_TASK_PAYLOAD_ENTRY_BYTES, false, false, false, 0, 0,
330                            &desc[4]);
331    }
332 
333    desc += 8;
334 
335    if (mesh_scratch_ring_bo) {
336       radv_set_ring_buffer(pdev, mesh_scratch_ring_bo, 0, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES,
337                            false, false, false, 0, 0, &desc[0]);
338    }
339 
340    desc += 4;
341 
342    if (attr_ring_bo) {
343       assert(pdev->info.gfx_level >= GFX11);
344 
345       ac_build_attr_ring_descriptor(pdev->info.gfx_level, radv_buffer_get_va(attr_ring_bo), attr_ring_size, 0,
346                                     &desc[0]);
347    }
348 
349    desc += 4;
350 
351    /* add sample positions after all rings */
352    memcpy(desc, device->sample_locations_1x, 8);
353    desc += 2;
354    memcpy(desc, device->sample_locations_2x, 16);
355    desc += 4;
356    memcpy(desc, device->sample_locations_4x, 32);
357    desc += 8;
358    memcpy(desc, device->sample_locations_8x, 64);
359 }
360 
361 static void
radv_emit_gs_ring_sizes(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * esgs_ring_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t gsvs_ring_size)362 radv_emit_gs_ring_sizes(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *esgs_ring_bo,
363                         uint32_t esgs_ring_size, struct radeon_winsys_bo *gsvs_ring_bo, uint32_t gsvs_ring_size)
364 {
365    const struct radv_physical_device *pdev = radv_device_physical(device);
366 
367    if (!esgs_ring_bo && !gsvs_ring_bo)
368       return;
369 
370    if (esgs_ring_bo)
371       radv_cs_add_buffer(device->ws, cs, esgs_ring_bo);
372 
373    if (gsvs_ring_bo)
374       radv_cs_add_buffer(device->ws, cs, gsvs_ring_bo);
375 
376    if (pdev->info.gfx_level >= GFX7) {
377       radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
378       radeon_emit(cs, esgs_ring_size >> 8);
379       radeon_emit(cs, gsvs_ring_size >> 8);
380    } else {
381       radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
382       radeon_emit(cs, esgs_ring_size >> 8);
383       radeon_emit(cs, gsvs_ring_size >> 8);
384    }
385 }
386 
387 static void
radv_emit_tess_factor_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * tess_rings_bo)388 radv_emit_tess_factor_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *tess_rings_bo)
389 {
390    const struct radv_physical_device *pdev = radv_device_physical(device);
391    uint64_t tf_va;
392    uint32_t tf_ring_size;
393    if (!tess_rings_bo)
394       return;
395 
396    tf_ring_size = pdev->hs.tess_factor_ring_size / 4;
397    tf_va = radv_buffer_get_va(tess_rings_bo);
398 
399    radv_cs_add_buffer(device->ws, cs, tess_rings_bo);
400 
401    if (pdev->info.gfx_level >= GFX7) {
402       if (pdev->info.gfx_level >= GFX11) {
403          /* TF_RING_SIZE is per SE on GFX11. */
404          tf_ring_size /= pdev->info.max_se;
405       }
406 
407       radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(tf_ring_size));
408       radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE, tf_va >> 8);
409 
410       if (pdev->info.gfx_level >= GFX12) {
411          radeon_set_uconfig_reg(cs, R_03099C_VGT_TF_MEMORY_BASE_HI, S_030984_BASE_HI(tf_va >> 40));
412       } else if (pdev->info.gfx_level >= GFX10) {
413          radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI, S_030984_BASE_HI(tf_va >> 40));
414       } else if (pdev->info.gfx_level == GFX9) {
415          radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(tf_va >> 40));
416       }
417 
418       radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
419    } else {
420       radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE, S_008988_SIZE(tf_ring_size));
421       radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE, tf_va >> 8);
422       radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM, pdev->hs.hs_offchip_param);
423    }
424 }
425 
426 static VkResult
radv_initialise_task_control_buffer(struct radv_device * device,struct radeon_winsys_bo * task_rings_bo)427 radv_initialise_task_control_buffer(struct radv_device *device, struct radeon_winsys_bo *task_rings_bo)
428 {
429    const struct radv_physical_device *pdev = radv_device_physical(device);
430    uint32_t *ptr = (uint32_t *)radv_buffer_map(device->ws, task_rings_bo);
431    if (!ptr)
432       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
433 
434    const uint32_t num_entries = pdev->task_info.num_entries;
435    const uint64_t task_va = radv_buffer_get_va(task_rings_bo);
436    const uint64_t task_draw_ring_va = task_va + pdev->task_info.draw_ring_offset;
437    assert((task_draw_ring_va & 0xFFFFFF00) == (task_draw_ring_va & 0xFFFFFFFF));
438 
439    /* 64-bit write_ptr */
440    ptr[0] = num_entries;
441    ptr[1] = 0;
442    /* 64-bit read_ptr */
443    ptr[2] = num_entries;
444    ptr[3] = 0;
445    /* 64-bit dealloc_ptr */
446    ptr[4] = num_entries;
447    ptr[5] = 0;
448    /* num_entries */
449    ptr[6] = num_entries;
450    /* 64-bit draw ring address */
451    ptr[7] = task_draw_ring_va;
452    ptr[8] = task_draw_ring_va >> 32;
453 
454    device->ws->buffer_unmap(device->ws, task_rings_bo, false);
455    return VK_SUCCESS;
456 }
457 
458 static void
radv_emit_task_rings(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * task_rings_bo,bool compute)459 radv_emit_task_rings(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *task_rings_bo,
460                      bool compute)
461 {
462    if (!task_rings_bo)
463       return;
464 
465    const uint64_t task_ctrlbuf_va = radv_buffer_get_va(task_rings_bo);
466    assert(util_is_aligned(task_ctrlbuf_va, 256));
467    radv_cs_add_buffer(device->ws, cs, task_rings_bo);
468 
469    /* Tell the GPU where the task control buffer is. */
470    radeon_emit(cs, PKT3(PKT3_DISPATCH_TASK_STATE_INIT, 1, 0) | PKT3_SHADER_TYPE_S(!!compute));
471    /* bits [31:8]: control buffer address lo, bits[7:0]: reserved (set to zero) */
472    radeon_emit(cs, task_ctrlbuf_va & 0xFFFFFF00);
473    /* bits [31:0]: control buffer address hi */
474    radeon_emit(cs, task_ctrlbuf_va >> 32);
475 }
476 
477 static void
radv_emit_graphics_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * scratch_bo)478 radv_emit_graphics_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
479                            struct radeon_winsys_bo *scratch_bo)
480 {
481    const struct radv_physical_device *pdev = radv_device_physical(device);
482    const struct radeon_info *gpu_info = &pdev->info;
483 
484    if (!scratch_bo)
485       return;
486 
487    radv_cs_add_buffer(device->ws, cs, scratch_bo);
488 
489    if (gpu_info->gfx_level >= GFX11) {
490       uint64_t va = radv_buffer_get_va(scratch_bo);
491 
492       /* WAVES is per SE for SPI_TMPRING_SIZE. */
493       waves /= gpu_info->max_se;
494 
495       radeon_set_context_reg_seq(cs, R_0286E8_SPI_TMPRING_SIZE, 3);
496       radeon_emit(cs, S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 256)));
497       radeon_emit(cs, va >> 8);  /* SPI_GFX_SCRATCH_BASE_LO */
498       radeon_emit(cs, va >> 40); /* SPI_GFX_SCRATCH_BASE_HI */
499    } else {
500       radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
501                              S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(DIV_ROUND_UP(size_per_wave, 1024)));
502    }
503 }
504 
505 static void
radv_emit_compute_scratch(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * compute_scratch_bo)506 radv_emit_compute_scratch(struct radv_device *device, struct radeon_cmdbuf *cs, uint32_t size_per_wave, uint32_t waves,
507                           struct radeon_winsys_bo *compute_scratch_bo)
508 {
509    const struct radv_physical_device *pdev = radv_device_physical(device);
510    const struct radeon_info *gpu_info = &pdev->info;
511    uint64_t scratch_va;
512    uint32_t rsrc1;
513 
514    if (!compute_scratch_bo)
515       return;
516 
517    scratch_va = radv_buffer_get_va(compute_scratch_bo);
518    rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
519 
520    if (gpu_info->gfx_level >= GFX11)
521       rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
522    else
523       rsrc1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
524 
525    radv_cs_add_buffer(device->ws, cs, compute_scratch_bo);
526 
527    if (gpu_info->gfx_level >= GFX11) {
528       radeon_set_sh_reg_seq(cs, R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, 2);
529       radeon_emit(cs, scratch_va >> 8);
530       radeon_emit(cs, scratch_va >> 40);
531 
532       waves /= gpu_info->max_se;
533    }
534 
535    radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
536    radeon_emit(cs, scratch_va);
537    radeon_emit(cs, rsrc1);
538 
539    radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
540                      S_00B860_WAVES(waves) |
541                         S_00B860_WAVESIZE(DIV_ROUND_UP(size_per_wave, gpu_info->gfx_level >= GFX11 ? 256 : 1024)));
542 }
543 
544 static void
radv_emit_compute_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)545 radv_emit_compute_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
546                                   struct radeon_winsys_bo *descriptor_bo)
547 {
548    if (!descriptor_bo)
549       return;
550 
551    uint64_t va = radv_buffer_get_va(descriptor_bo);
552    radv_cs_add_buffer(device->ws, cs, descriptor_bo);
553 
554    /* Compute shader user data 0-1 have the scratch pointer (unlike GFX shaders),
555     * so emit the descriptor pointer to user data 2-3 instead (task_ring_offsets arg).
556     */
557    radv_emit_shader_pointer(device, cs, R_00B908_COMPUTE_USER_DATA_2, va, true);
558 }
559 
560 static void
radv_emit_graphics_shader_pointers(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)561 radv_emit_graphics_shader_pointers(struct radv_device *device, struct radeon_cmdbuf *cs,
562                                    struct radeon_winsys_bo *descriptor_bo)
563 {
564    const struct radv_physical_device *pdev = radv_device_physical(device);
565    uint64_t va;
566 
567    if (!descriptor_bo)
568       return;
569 
570    va = radv_buffer_get_va(descriptor_bo);
571 
572    radv_cs_add_buffer(device->ws, cs, descriptor_bo);
573 
574    if (pdev->info.gfx_level >= GFX12) {
575       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B410_SPI_SHADER_PGM_LO_HS,
576                          R_00B210_SPI_SHADER_PGM_LO_GS};
577 
578       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
579          radv_emit_shader_pointer(device, cs, regs[i], va, true);
580       }
581    } else if (pdev->info.gfx_level >= GFX11) {
582       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B420_SPI_SHADER_PGM_LO_HS,
583                          R_00B220_SPI_SHADER_PGM_LO_GS};
584 
585       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
586          radv_emit_shader_pointer(device, cs, regs[i], va, true);
587       }
588    } else if (pdev->info.gfx_level >= GFX10) {
589       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
590                          R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
591 
592       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
593          radv_emit_shader_pointer(device, cs, regs[i], va, true);
594       }
595    } else if (pdev->info.gfx_level == GFX9) {
596       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
597                          R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
598 
599       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
600          radv_emit_shader_pointer(device, cs, regs[i], va, true);
601       }
602    } else {
603       uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
604                          R_00B230_SPI_SHADER_USER_DATA_GS_0, R_00B330_SPI_SHADER_USER_DATA_ES_0,
605                          R_00B430_SPI_SHADER_USER_DATA_HS_0, R_00B530_SPI_SHADER_USER_DATA_LS_0};
606 
607       for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
608          radv_emit_shader_pointer(device, cs, regs[i], va, true);
609       }
610    }
611 }
612 
613 static void
radv_emit_attribute_ring(struct radv_device * device,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * attr_ring_bo,uint32_t attr_ring_size)614 radv_emit_attribute_ring(struct radv_device *device, struct radeon_cmdbuf *cs, struct radeon_winsys_bo *attr_ring_bo,
615                          uint32_t attr_ring_size)
616 {
617    const struct radv_physical_device *pdev = radv_device_physical(device);
618    uint64_t va;
619 
620    if (!attr_ring_bo)
621       return;
622 
623    assert(pdev->info.gfx_level >= GFX11);
624 
625    va = radv_buffer_get_va(attr_ring_bo);
626    assert((va >> 32) == pdev->info.address32_hi);
627 
628    radv_cs_add_buffer(device->ws, cs, attr_ring_bo);
629 
630    /* We must wait for idle using an EOP event before changing the attribute ring registers. Use the
631     * bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
632     */
633    radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
634    radeon_emit(cs, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | S_490_EVENT_INDEX(5) | S_490_PWS_ENABLE(1));
635    radeon_emit(cs, 0); /* DST_SEL, INT_SEL, DATA_SEL */
636    radeon_emit(cs, 0); /* ADDRESS_LO */
637    radeon_emit(cs, 0); /* ADDRESS_HI */
638    radeon_emit(cs, 0); /* DATA_LO */
639    radeon_emit(cs, 0); /* DATA_HI */
640    radeon_emit(cs, 0); /* INT_CTXID */
641 
642    /* Wait for the PWS counter. */
643    radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
644    radeon_emit(cs, S_580_PWS_STAGE_SEL(V_580_CP_ME) | S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) | S_580_PWS_ENA2(1) |
645                       S_580_PWS_COUNT(0));
646    radeon_emit(cs, 0xffffffff); /* GCR_SIZE */
647    radeon_emit(cs, 0x01ffffff); /* GCR_SIZE_HI */
648    radeon_emit(cs, 0);          /* GCR_BASE_LO */
649    radeon_emit(cs, 0);          /* GCR_BASE_HI */
650    radeon_emit(cs, S_585_PWS_ENA(1));
651    radeon_emit(cs, 0); /* GCR_CNTL */
652 
653    /* The PS will read inputs from this address. */
654    radeon_set_uconfig_reg_seq(cs, R_031110_SPI_GS_THROTTLE_CNTL1, 4);
655    radeon_emit(cs, 0x12355123); /* SPI_GS_THROTTLE_CNTL1 */
656    radeon_emit(cs, 0x1544D);    /* SPI_GS_THROTTLE_CNTL2 */
657    radeon_emit(cs, va >> 16);   /* SPI_ATTRIBUTE_RING_BASE */
658    radeon_emit(cs, S_03111C_MEM_SIZE(((attr_ring_size / pdev->info.max_se) >> 16) - 1) |
659                       S_03111C_BIG_PAGE(pdev->info.discardable_allows_big_page) |
660                       S_03111C_L1_POLICY(1)); /* SPI_ATTRIBUTE_RING_SIZE */
661 
662    if (pdev->info.gfx_level >= GFX12) {
663       const uint64_t pos_address = va + pdev->info.pos_ring_offset;
664       const uint64_t prim_address = va + pdev->info.prim_ring_offset;
665 
666       /* When one of these 4 registers is updated, all 4 must be updated. */
667       radeon_set_uconfig_reg_seq(cs, R_0309A0_GE_POS_RING_BASE, 4);
668       radeon_emit(cs, pos_address >> 16);                                          /* R_0309A0_GE_POS_RING_BASE */
669       radeon_emit(cs, S_0309A4_MEM_SIZE(pdev->info.pos_ring_size_per_se >> 5));    /* R_0309A4_GE_POS_RING_SIZE */
670       radeon_emit(cs, prim_address >> 16);                                         /* R_0309A8_GE_PRIM_RING_BASE */
671       radeon_emit(cs, S_0309AC_MEM_SIZE(pdev->info.prim_ring_size_per_se >> 5) | S_0309AC_SCOPE(gfx12_scope_device) |
672                          S_0309AC_PAF_TEMPORAL(gfx12_store_high_temporal_stay_dirty) |
673                          S_0309AC_PAB_TEMPORAL(gfx12_load_last_use_discard) |
674                          S_0309AC_SPEC_DATA_READ(gfx12_spec_read_auto) | S_0309AC_FORCE_SE_SCOPE(1) |
675                          S_0309AC_PAB_NOFILL(1)); /* R_0309AC_GE_PRIM_RING_SIZE */
676    }
677 }
678 
679 static void
radv_emit_compute(struct radv_device * device,struct radeon_cmdbuf * cs,bool is_compute_queue)680 radv_emit_compute(struct radv_device *device, struct radeon_cmdbuf *cs, bool is_compute_queue)
681 {
682    const struct radv_physical_device *pdev = radv_device_physical(device);
683    const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
684 
685    struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 64, is_compute_queue);
686    if (!pm4)
687       return;
688 
689    const struct ac_preamble_state preamble_state = {
690       .border_color_va = border_color_va,
691       .gfx11 =
692          {
693             .compute_dispatch_interleave = 64,
694          },
695    };
696 
697    ac_init_compute_preamble_state(&preamble_state, pm4);
698 
699    ac_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
700    ac_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
701    ac_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
702 
703    if (device->tma_bo) {
704       uint64_t tba_va, tma_va;
705 
706       assert(pdev->info.gfx_level == GFX8);
707 
708       tba_va = radv_shader_get_va(device->trap_handler_shader);
709       tma_va = radv_buffer_get_va(device->tma_bo);
710 
711       ac_pm4_set_reg(pm4, R_00B838_COMPUTE_TBA_LO, tba_va >> 8);
712       ac_pm4_set_reg(pm4, R_00B83C_COMPUTE_TBA_HI, tba_va >> 40);
713       ac_pm4_set_reg(pm4, R_00B840_COMPUTE_TMA_LO, tma_va >> 8);
714       ac_pm4_set_reg(pm4, R_00B844_COMPUTE_TMA_HI, tma_va >> 40);
715    }
716 
717    ac_pm4_finalize(pm4);
718 
719    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
720 
721    ac_pm4_free_state(pm4);
722 }
723 
724 /* 12.4 fixed-point */
725 static unsigned
radv_pack_float_12p4(float x)726 radv_pack_float_12p4(float x)
727 {
728    return x <= 0 ? 0 : x >= 4096 ? 0xffff : x * 16;
729 }
730 
731 void
radv_emit_graphics(struct radv_device * device,struct radeon_cmdbuf * cs)732 radv_emit_graphics(struct radv_device *device, struct radeon_cmdbuf *cs)
733 {
734    struct radv_physical_device *pdev = radv_device_physical(device);
735    const uint64_t border_color_va = device->border_color_data.bo ? radv_buffer_get_va(device->border_color_data.bo) : 0;
736    bool has_clear_state = pdev->info.has_clear_state;
737    int i;
738 
739    if (!device->uses_shadow_regs) {
740       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
741       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
742       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
743 
744       if (has_clear_state) {
745          radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
746          radeon_emit(cs, 0);
747       }
748    }
749 
750    struct ac_pm4_state *pm4 = ac_pm4_create_sized(&pdev->info, false, 512, false);
751    if (!pm4)
752       return;
753 
754    const struct ac_preamble_state preamble_state = {
755       .border_color_va = border_color_va,
756    };
757 
758    ac_init_graphics_preamble_state(&preamble_state, pm4);
759 
760    if (!has_clear_state) {
761       for (i = 0; i < 16; i++) {
762          radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i * 8, 0);
763          radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i * 8, fui(1.0));
764       }
765    }
766 
767    if (!has_clear_state) {
768       radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
769       /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
770       radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
771    }
772 
773    if (pdev->info.gfx_level <= GFX8)
774       radeon_set_sh_reg(cs, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(pdev->info.address32_hi >> 8));
775 
776    if (pdev->info.gfx_level < GFX11)
777       radeon_set_sh_reg(cs, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(pdev->info.address32_hi >> 8));
778 
779    unsigned cu_mask_ps = pdev->info.gfx_level >= GFX10_3 ? ac_gfx103_get_cu_mask_ps(&pdev->info) : ~0u;
780 
781    if (pdev->info.gfx_level >= GFX12) {
782       radeon_set_sh_reg(cs, R_00B420_SPI_SHADER_PGM_RSRC4_HS,
783                         S_00B420_WAVE_LIMIT(0x3ff) | S_00B420_GLG_FORCE_DISABLE(1));
784       radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC4_PS,
785                         S_00B01C_WAVE_LIMIT_GFX12(0x3FF) | S_00B01C_LDS_GROUP_SIZE_GFX12(1));
786    } else if (pdev->info.gfx_level >= GFX11) {
787       radeon_set_sh_reg_idx(&pdev->info, cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS, 3,
788                             ac_apply_cu_en(S_00B404_CU_EN(0xffff), C_00B404_CU_EN, 16, &pdev->info));
789       radeon_set_sh_reg_idx(&pdev->info, cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS, 3,
790                             ac_apply_cu_en(S_00B004_CU_EN(cu_mask_ps >> 16), C_00B004_CU_EN, 16, &pdev->info));
791    }
792 
793    if (pdev->info.gfx_level >= GFX10) {
794       /* Vulkan doesn't support user edge flags and it also doesn't
795        * need to prevent drawing lines on internal edges of
796        * decomposed primitives (such as quads) with polygon mode = lines.
797        */
798       unsigned vertex_reuse_depth = pdev->info.gfx_level >= GFX10_3 ? 30 : 0;
799       radeon_set_context_reg(cs, R_028838_PA_CL_NGG_CNTL,
800                              S_028838_INDEX_BUF_EDGE_FLAG_ENA(0) | S_028838_VERTEX_REUSE_DEPTH(vertex_reuse_depth));
801 
802       if (pdev->info.gfx_level >= GFX10_3) {
803          /* This allows sample shading. */
804          radeon_set_context_reg(cs, R_028848_PA_CL_VRS_CNTL,
805                                 S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_SC_VRS_COMB_MODE_OVERRIDE));
806       }
807    }
808 
809    if (pdev->info.gfx_level >= GFX8) {
810       /* GFX8+ only compares the bits according to the index type by default,
811        * so we can always leave the programmed value at the maximum.
812        */
813       radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0xffffffff);
814    }
815 
816    if (pdev->info.gfx_level >= GFX12) {
817       radeon_set_context_reg(cs, R_028C54_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL, S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
818    }
819 
820    unsigned tmp = (unsigned)(1.0 * 8.0);
821    radeon_set_context_reg(cs, R_028A00_PA_SU_POINT_SIZE, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
822    radeon_set_context_reg(
823       cs, R_028A04_PA_SU_POINT_MINMAX,
824       S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875 / 2)));
825 
826    /* Enable the Polaris small primitive filter control.
827     * XXX: There is possibly an issue when MSAA is off (see RadeonSI
828     * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
829     * and AMDVLK doesn't have a workaround as well.
830     */
831    if (pdev->info.family >= CHIP_POLARIS10) {
832       unsigned small_prim_filter_cntl = S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
833                                         /* Workaround for a hw line bug. */
834                                         S_028830_LINE_FILTER_DISABLE(pdev->info.family <= CHIP_POLARIS12);
835 
836       radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL, small_prim_filter_cntl);
837    }
838 
839    if (pdev->info.gfx_level >= GFX12) {
840       radeon_set_context_reg(cs, R_028644_SPI_INTERP_CONTROL_0,
841                              S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
842                                 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
843                                 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
844                                 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
845                                 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
846                                 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
847    } else {
848       radeon_set_context_reg(cs, R_0286D4_SPI_INTERP_CONTROL_0,
849                              S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) |
850                                 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
851                                 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
852                                 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
853                                 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
854                                 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
855    }
856 
857    radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL,
858                           S_028BE4_PIX_CENTER(1) | S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
859                              S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
860 
861    if (pdev->info.gfx_level >= GFX12) {
862       radeon_set_context_reg(cs, R_028814_PA_CL_VTE_CNTL,
863                              S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
864                                 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
865                                 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
866    } else {
867       radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL,
868                              S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
869                                 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
870                                 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
871    }
872 
873    if (device->tma_bo) {
874       uint64_t tba_va, tma_va;
875 
876       assert(pdev->info.gfx_level == GFX8);
877 
878       tba_va = radv_shader_get_va(device->trap_handler_shader);
879       tma_va = radv_buffer_get_va(device->tma_bo);
880 
881       uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS, R_00B100_SPI_SHADER_TBA_LO_VS, R_00B200_SPI_SHADER_TBA_LO_GS,
882                          R_00B300_SPI_SHADER_TBA_LO_ES, R_00B400_SPI_SHADER_TBA_LO_HS, R_00B500_SPI_SHADER_TBA_LO_LS};
883 
884       for (i = 0; i < ARRAY_SIZE(regs); ++i) {
885          radeon_set_sh_reg_seq(cs, regs[i], 4);
886          radeon_emit(cs, tba_va >> 8);
887          radeon_emit(cs, tba_va >> 40);
888          radeon_emit(cs, tma_va >> 8);
889          radeon_emit(cs, tma_va >> 40);
890       }
891    }
892 
893    radeon_set_context_reg(cs, R_028828_PA_SU_LINE_STIPPLE_SCALE, 0x3f800000);
894 
895    if (pdev->info.gfx_level >= GFX12) {
896       radeon_set_context_reg(cs, R_028000_DB_RENDER_CONTROL, 0);
897    }
898 
899    ac_pm4_finalize(pm4);
900    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
901    ac_pm4_free_state(pm4);
902 
903    radv_emit_compute(device, cs, false);
904 }
905 
906 static void
radv_init_graphics_state(struct radeon_cmdbuf * cs,struct radv_device * device)907 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_device *device)
908 {
909    if (device->gfx_init) {
910       struct radeon_winsys *ws = device->ws;
911 
912       ws->cs_execute_ib(cs, device->gfx_init, 0, device->gfx_init_size_dw & 0xffff, false);
913 
914       radv_cs_add_buffer(device->ws, cs, device->gfx_init);
915    } else {
916       radv_emit_graphics(device, cs);
917    }
918 }
919 
920 static VkResult
radv_update_preamble_cs(struct radv_queue_state * queue,struct radv_device * device,const struct radv_queue_ring_info * needs)921 radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *device,
922                         const struct radv_queue_ring_info *needs)
923 {
924    const struct radv_physical_device *pdev = radv_device_physical(device);
925    struct radeon_winsys *ws = device->ws;
926    struct radeon_winsys_bo *scratch_bo = queue->scratch_bo;
927    struct radeon_winsys_bo *descriptor_bo = queue->descriptor_bo;
928    struct radeon_winsys_bo *compute_scratch_bo = queue->compute_scratch_bo;
929    struct radeon_winsys_bo *esgs_ring_bo = queue->esgs_ring_bo;
930    struct radeon_winsys_bo *gsvs_ring_bo = queue->gsvs_ring_bo;
931    struct radeon_winsys_bo *tess_rings_bo = queue->tess_rings_bo;
932    struct radeon_winsys_bo *task_rings_bo = queue->task_rings_bo;
933    struct radeon_winsys_bo *mesh_scratch_ring_bo = queue->mesh_scratch_ring_bo;
934    struct radeon_winsys_bo *attr_ring_bo = queue->attr_ring_bo;
935    struct radeon_winsys_bo *gds_bo = queue->gds_bo;
936    struct radeon_winsys_bo *gds_oa_bo = queue->gds_oa_bo;
937    struct radeon_cmdbuf *dest_cs[3] = {0};
938    const uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
939    VkResult result = VK_SUCCESS;
940 
941    const bool add_sample_positions = !queue->ring_info.sample_positions && needs->sample_positions;
942    const uint32_t scratch_size = needs->scratch_size_per_wave * needs->scratch_waves;
943    const uint32_t queue_scratch_size = queue->ring_info.scratch_size_per_wave * queue->ring_info.scratch_waves;
944 
945    if (scratch_size > queue_scratch_size) {
946       result = radv_bo_create(device, NULL, scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
947                               RADV_BO_PRIORITY_SCRATCH, 0, true, &scratch_bo);
948       if (result != VK_SUCCESS)
949          goto fail;
950       radv_rmv_log_command_buffer_bo_create(device, scratch_bo, 0, 0, scratch_size);
951    }
952 
953    const uint32_t compute_scratch_size = needs->compute_scratch_size_per_wave * needs->compute_scratch_waves;
954    const uint32_t compute_queue_scratch_size =
955       queue->ring_info.compute_scratch_size_per_wave * queue->ring_info.compute_scratch_waves;
956    if (compute_scratch_size > compute_queue_scratch_size) {
957       result = radv_bo_create(device, NULL, compute_scratch_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
958                               RADV_BO_PRIORITY_SCRATCH, 0, true, &compute_scratch_bo);
959       if (result != VK_SUCCESS)
960          goto fail;
961       radv_rmv_log_command_buffer_bo_create(device, compute_scratch_bo, 0, 0, compute_scratch_size);
962    }
963 
964    if (needs->esgs_ring_size > queue->ring_info.esgs_ring_size) {
965       result = radv_bo_create(device, NULL, needs->esgs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
966                               RADV_BO_PRIORITY_SCRATCH, 0, true, &esgs_ring_bo);
967       if (result != VK_SUCCESS)
968          goto fail;
969       radv_rmv_log_command_buffer_bo_create(device, esgs_ring_bo, 0, 0, needs->esgs_ring_size);
970    }
971 
972    if (needs->gsvs_ring_size > queue->ring_info.gsvs_ring_size) {
973       result = radv_bo_create(device, NULL, needs->gsvs_ring_size, 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
974                               RADV_BO_PRIORITY_SCRATCH, 0, true, &gsvs_ring_bo);
975       if (result != VK_SUCCESS)
976          goto fail;
977       radv_rmv_log_command_buffer_bo_create(device, gsvs_ring_bo, 0, 0, needs->gsvs_ring_size);
978    }
979 
980    if (!queue->ring_info.tess_rings && needs->tess_rings) {
981       uint64_t tess_rings_size = pdev->hs.tess_offchip_ring_offset + pdev->hs.tess_offchip_ring_size;
982       result = radv_bo_create(device, NULL, tess_rings_size, 256, RADEON_DOMAIN_VRAM, ring_bo_flags,
983                               RADV_BO_PRIORITY_SCRATCH, 0, true, &tess_rings_bo);
984       if (result != VK_SUCCESS)
985          goto fail;
986       radv_rmv_log_command_buffer_bo_create(device, tess_rings_bo, 0, 0, tess_rings_size);
987    }
988 
989    if (!queue->ring_info.task_rings && needs->task_rings) {
990       assert(pdev->info.gfx_level >= GFX10_3);
991 
992       /* We write the control buffer from the CPU, so need to grant CPU access to the BO.
993        * The draw ring needs to be zero-initialized otherwise the ready bits will be incorrect.
994        */
995       uint32_t task_rings_bo_flags =
996          RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM;
997 
998       result = radv_bo_create(device, NULL, pdev->task_info.bo_size_bytes, 256, RADEON_DOMAIN_VRAM, task_rings_bo_flags,
999                               RADV_BO_PRIORITY_SCRATCH, 0, true, &task_rings_bo);
1000       if (result != VK_SUCCESS)
1001          goto fail;
1002       radv_rmv_log_command_buffer_bo_create(device, task_rings_bo, 0, 0, pdev->task_info.bo_size_bytes);
1003 
1004       result = radv_initialise_task_control_buffer(device, task_rings_bo);
1005       if (result != VK_SUCCESS)
1006          goto fail;
1007    }
1008 
1009    if (!queue->ring_info.mesh_scratch_ring && needs->mesh_scratch_ring) {
1010       assert(pdev->info.gfx_level >= GFX10_3);
1011       result =
1012          radv_bo_create(device, NULL, RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES, 256,
1013                         RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true, &mesh_scratch_ring_bo);
1014 
1015       if (result != VK_SUCCESS)
1016          goto fail;
1017       radv_rmv_log_command_buffer_bo_create(device, mesh_scratch_ring_bo, 0, 0,
1018                                             RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES);
1019    }
1020 
1021    if (needs->attr_ring_size > queue->ring_info.attr_ring_size) {
1022       assert(pdev->info.gfx_level >= GFX11);
1023       result = radv_bo_create(device, NULL, needs->attr_ring_size, 2 * 1024 * 1024 /* 2MiB */, RADEON_DOMAIN_VRAM,
1024                               RADEON_FLAG_32BIT | RADEON_FLAG_DISCARDABLE | ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0,
1025                               true, &attr_ring_bo);
1026       if (result != VK_SUCCESS)
1027          goto fail;
1028       radv_rmv_log_command_buffer_bo_create(device, attr_ring_bo, 0, 0, needs->attr_ring_size);
1029    }
1030 
1031    if (!queue->ring_info.gds && needs->gds) {
1032       assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1033 
1034       /* 4 streamout GDS counters.
1035        * We need 256B (64 dw) of GDS, otherwise streamout hangs.
1036        */
1037       result = radv_bo_create(device, NULL, 256, 4, RADEON_DOMAIN_GDS, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1038                               &gds_bo);
1039       if (result != VK_SUCCESS)
1040          goto fail;
1041 
1042       /* Add the GDS BO to our global BO list to prevent the kernel to emit a GDS switch and reset
1043        * the state when a compute queue is used.
1044        */
1045       result = device->ws->buffer_make_resident(ws, gds_bo, true);
1046       if (result != VK_SUCCESS)
1047          goto fail;
1048    }
1049 
1050    if (!queue->ring_info.gds_oa && needs->gds_oa) {
1051       assert(pdev->info.gfx_level >= GFX10 && pdev->info.gfx_level < GFX12);
1052 
1053       result = radv_bo_create(device, NULL, 1, 1, RADEON_DOMAIN_OA, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, true,
1054                               &gds_oa_bo);
1055       if (result != VK_SUCCESS)
1056          goto fail;
1057 
1058       /* Add the GDS OA BO to our global BO list to prevent the kernel to emit a GDS switch and
1059        * reset the state when a compute queue is used.
1060        */
1061       result = device->ws->buffer_make_resident(ws, gds_oa_bo, true);
1062       if (result != VK_SUCCESS)
1063          goto fail;
1064    }
1065 
1066    /* Re-initialize the descriptor BO when any ring BOs changed.
1067     *
1068     * Additionally, make sure to create the descriptor BO for the compute queue
1069     * when it uses the task shader rings. The task rings BO is shared between the
1070     * GFX and compute queues and already initialized here.
1071     */
1072    if ((queue->qf == RADV_QUEUE_COMPUTE && !descriptor_bo && task_rings_bo) || scratch_bo != queue->scratch_bo ||
1073        esgs_ring_bo != queue->esgs_ring_bo || gsvs_ring_bo != queue->gsvs_ring_bo ||
1074        tess_rings_bo != queue->tess_rings_bo || task_rings_bo != queue->task_rings_bo ||
1075        mesh_scratch_ring_bo != queue->mesh_scratch_ring_bo || attr_ring_bo != queue->attr_ring_bo ||
1076        add_sample_positions) {
1077       const uint32_t size = 304;
1078 
1079       result = radv_bo_create(device, NULL, size, 4096, RADEON_DOMAIN_VRAM,
1080                               RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
1081                               RADV_BO_PRIORITY_DESCRIPTOR, 0, true, &descriptor_bo);
1082       if (result != VK_SUCCESS)
1083          goto fail;
1084    }
1085 
1086    if (descriptor_bo != queue->descriptor_bo) {
1087       uint32_t *map = (uint32_t *)radv_buffer_map(ws, descriptor_bo);
1088       if (!map) {
1089          result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1090          goto fail;
1091       }
1092 
1093       radv_fill_shader_rings(device, map, scratch_bo, needs->esgs_ring_size, esgs_ring_bo, needs->gsvs_ring_size,
1094                              gsvs_ring_bo, tess_rings_bo, task_rings_bo, mesh_scratch_ring_bo, needs->attr_ring_size,
1095                              attr_ring_bo);
1096 
1097       ws->buffer_unmap(ws, descriptor_bo, false);
1098    }
1099 
1100    for (int i = 0; i < 3; ++i) {
1101       enum rgp_flush_bits sqtt_flush_bits = 0;
1102       struct radeon_cmdbuf *cs = NULL;
1103       cs = ws->cs_create(ws, radv_queue_family_to_ring(pdev, queue->qf), false);
1104       if (!cs) {
1105          result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1106          goto fail;
1107       }
1108 
1109       radeon_check_space(ws, cs, 512);
1110       dest_cs[i] = cs;
1111 
1112       if (scratch_bo)
1113          radv_cs_add_buffer(ws, cs, scratch_bo);
1114 
1115       /* Emit initial configuration. */
1116       switch (queue->qf) {
1117       case RADV_QUEUE_GENERAL:
1118          if (queue->uses_shadow_regs)
1119             radv_emit_shadow_regs_preamble(cs, device, queue);
1120          radv_init_graphics_state(cs, device);
1121 
1122          if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || task_rings_bo) {
1123             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1124             radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1125 
1126             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1127             radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1128          }
1129 
1130          radv_emit_gs_ring_sizes(device, cs, esgs_ring_bo, needs->esgs_ring_size, gsvs_ring_bo, needs->gsvs_ring_size);
1131          radv_emit_tess_factor_ring(device, cs, tess_rings_bo);
1132          radv_emit_task_rings(device, cs, task_rings_bo, false);
1133          radv_emit_attribute_ring(device, cs, attr_ring_bo, needs->attr_ring_size);
1134          radv_emit_graphics_shader_pointers(device, cs, descriptor_bo);
1135          radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1136                                    compute_scratch_bo);
1137          radv_emit_graphics_scratch(device, cs, needs->scratch_size_per_wave, needs->scratch_waves, scratch_bo);
1138          break;
1139       case RADV_QUEUE_COMPUTE:
1140          radv_emit_compute(device, cs, true);
1141 
1142          if (task_rings_bo) {
1143             radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1144             radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1145          }
1146 
1147          radv_emit_task_rings(device, cs, task_rings_bo, true);
1148          radv_emit_compute_shader_pointers(device, cs, descriptor_bo);
1149          radv_emit_compute_scratch(device, cs, needs->compute_scratch_size_per_wave, needs->compute_scratch_waves,
1150                                    compute_scratch_bo);
1151          break;
1152       default:
1153          break;
1154       }
1155 
1156       if (i < 2) {
1157          /* The two initial preambles have a cache flush at the beginning. */
1158          const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
1159          enum radv_cmd_flush_bits flush_bits = RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE |
1160                                                RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2 |
1161                                                RADV_CMD_FLAG_START_PIPELINE_STATS;
1162 
1163          if (i == 0) {
1164             /* The full flush preamble should also wait for previous shader work to finish. */
1165             flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1166             if (queue->qf == RADV_QUEUE_GENERAL)
1167                flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1168          }
1169 
1170          radv_cs_emit_cache_flush(ws, cs, gfx_level, NULL, 0, queue->qf, flush_bits, &sqtt_flush_bits, 0);
1171       }
1172 
1173       result = ws->cs_finalize(cs);
1174       if (result != VK_SUCCESS)
1175          goto fail;
1176    }
1177 
1178    if (queue->initial_full_flush_preamble_cs)
1179       ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1180 
1181    if (queue->initial_preamble_cs)
1182       ws->cs_destroy(queue->initial_preamble_cs);
1183 
1184    if (queue->continue_preamble_cs)
1185       ws->cs_destroy(queue->continue_preamble_cs);
1186 
1187    queue->initial_full_flush_preamble_cs = dest_cs[0];
1188    queue->initial_preamble_cs = dest_cs[1];
1189    queue->continue_preamble_cs = dest_cs[2];
1190 
1191    if (scratch_bo != queue->scratch_bo) {
1192       if (queue->scratch_bo) {
1193          radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1194          radv_bo_destroy(device, NULL, queue->scratch_bo);
1195       }
1196       queue->scratch_bo = scratch_bo;
1197    }
1198 
1199    if (compute_scratch_bo != queue->compute_scratch_bo) {
1200       if (queue->compute_scratch_bo) {
1201          radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
1202          radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
1203       }
1204       queue->compute_scratch_bo = compute_scratch_bo;
1205    }
1206 
1207    if (esgs_ring_bo != queue->esgs_ring_bo) {
1208       if (queue->esgs_ring_bo) {
1209          radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1210          radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
1211       }
1212       queue->esgs_ring_bo = esgs_ring_bo;
1213    }
1214 
1215    if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1216       if (queue->gsvs_ring_bo) {
1217          radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1218          radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
1219       }
1220       queue->gsvs_ring_bo = gsvs_ring_bo;
1221    }
1222 
1223    if (descriptor_bo != queue->descriptor_bo) {
1224       if (queue->descriptor_bo)
1225          radv_bo_destroy(device, NULL, queue->descriptor_bo);
1226       queue->descriptor_bo = descriptor_bo;
1227    }
1228 
1229    queue->tess_rings_bo = tess_rings_bo;
1230    queue->task_rings_bo = task_rings_bo;
1231    queue->mesh_scratch_ring_bo = mesh_scratch_ring_bo;
1232    queue->attr_ring_bo = attr_ring_bo;
1233    queue->gds_bo = gds_bo;
1234    queue->gds_oa_bo = gds_oa_bo;
1235    queue->ring_info = *needs;
1236    return VK_SUCCESS;
1237 fail:
1238    for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1239       if (dest_cs[i])
1240          ws->cs_destroy(dest_cs[i]);
1241    if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1242       radv_bo_destroy(device, NULL, descriptor_bo);
1243    if (scratch_bo && scratch_bo != queue->scratch_bo)
1244       radv_bo_destroy(device, NULL, scratch_bo);
1245    if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1246       radv_bo_destroy(device, NULL, compute_scratch_bo);
1247    if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1248       radv_bo_destroy(device, NULL, esgs_ring_bo);
1249    if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1250       radv_bo_destroy(device, NULL, gsvs_ring_bo);
1251    if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
1252       radv_bo_destroy(device, NULL, tess_rings_bo);
1253    if (task_rings_bo && task_rings_bo != queue->task_rings_bo)
1254       radv_bo_destroy(device, NULL, task_rings_bo);
1255    if (attr_ring_bo && attr_ring_bo != queue->attr_ring_bo)
1256       radv_bo_destroy(device, NULL, attr_ring_bo);
1257    if (gds_bo && gds_bo != queue->gds_bo) {
1258       ws->buffer_make_resident(ws, queue->gds_bo, false);
1259       radv_bo_destroy(device, NULL, gds_bo);
1260    }
1261    if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo) {
1262       ws->buffer_make_resident(ws, queue->gds_oa_bo, false);
1263       radv_bo_destroy(device, NULL, gds_oa_bo);
1264    }
1265 
1266    return vk_error(queue, result);
1267 }
1268 
1269 static VkResult
radv_update_preambles(struct radv_queue_state * queue,struct radv_device * device,struct vk_command_buffer * const * cmd_buffers,uint32_t cmd_buffer_count,bool * use_perf_counters,bool * has_follower)1270 radv_update_preambles(struct radv_queue_state *queue, struct radv_device *device,
1271                       struct vk_command_buffer *const *cmd_buffers, uint32_t cmd_buffer_count, bool *use_perf_counters,
1272                       bool *has_follower)
1273 {
1274    const struct radv_physical_device *pdev = radv_device_physical(device);
1275    bool has_indirect_pipeline_binds = false;
1276 
1277    if (queue->qf != RADV_QUEUE_GENERAL && queue->qf != RADV_QUEUE_COMPUTE) {
1278       for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1279          struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1280 
1281          *has_follower |= !!cmd_buffer->gang.cs;
1282       }
1283 
1284       return VK_SUCCESS;
1285    }
1286 
1287    /* Figure out the needs of the current submission.
1288     * Start by copying the queue's current info.
1289     * This is done because we only allow two possible behaviours for these buffers:
1290     * - Grow when the newly needed amount is larger than what we had
1291     * - Allocate the max size and reuse it, but don't free it until the queue is destroyed
1292     */
1293    struct radv_queue_ring_info needs = queue->ring_info;
1294    *use_perf_counters = false;
1295    *has_follower = false;
1296 
1297    for (uint32_t j = 0; j < cmd_buffer_count; j++) {
1298       struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
1299 
1300       needs.scratch_size_per_wave = MAX2(needs.scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
1301       needs.scratch_waves = MAX2(needs.scratch_waves, cmd_buffer->scratch_waves_wanted);
1302       needs.compute_scratch_size_per_wave =
1303          MAX2(needs.compute_scratch_size_per_wave, cmd_buffer->compute_scratch_size_per_wave_needed);
1304       needs.compute_scratch_waves = MAX2(needs.compute_scratch_waves, cmd_buffer->compute_scratch_waves_wanted);
1305       needs.esgs_ring_size = MAX2(needs.esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1306       needs.gsvs_ring_size = MAX2(needs.gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1307       needs.tess_rings |= cmd_buffer->tess_rings_needed;
1308       needs.task_rings |= cmd_buffer->task_rings_needed;
1309       needs.mesh_scratch_ring |= cmd_buffer->mesh_scratch_ring_needed;
1310       needs.gds |= cmd_buffer->gds_needed;
1311       needs.gds_oa |= cmd_buffer->gds_oa_needed;
1312       needs.sample_positions |= cmd_buffer->sample_positions_needed;
1313       *use_perf_counters |= cmd_buffer->state.uses_perf_counters;
1314       *has_follower |= !!cmd_buffer->gang.cs;
1315 
1316       has_indirect_pipeline_binds |= cmd_buffer->has_indirect_pipeline_binds;
1317    }
1318 
1319    if (has_indirect_pipeline_binds) {
1320       /* Use the maximum possible scratch size for indirect compute pipelines with DGC. */
1321       simple_mtx_lock(&device->compute_scratch_mtx);
1322       needs.compute_scratch_size_per_wave = MAX2(needs.compute_scratch_waves, device->compute_scratch_size_per_wave);
1323       needs.compute_scratch_waves = MAX2(needs.compute_scratch_waves, device->compute_scratch_waves);
1324       simple_mtx_unlock(&device->compute_scratch_mtx);
1325    }
1326 
1327    /* Sanitize scratch size information. */
1328    needs.scratch_waves =
1329       needs.scratch_size_per_wave ? MIN2(needs.scratch_waves, UINT32_MAX / needs.scratch_size_per_wave) : 0;
1330    needs.compute_scratch_waves =
1331       needs.compute_scratch_size_per_wave
1332          ? MIN2(needs.compute_scratch_waves, UINT32_MAX / needs.compute_scratch_size_per_wave)
1333          : 0;
1334 
1335    if (pdev->info.gfx_level >= GFX11 && queue->qf == RADV_QUEUE_GENERAL) {
1336       needs.attr_ring_size = pdev->info.total_attribute_pos_prim_ring_size;
1337    }
1338 
1339    /* Return early if we already match these needs.
1340     * Note that it's not possible for any of the needed values to be less
1341     * than what the queue already had, because we only ever increase the allocated size.
1342     */
1343    if (queue->initial_full_flush_preamble_cs && queue->ring_info.scratch_size_per_wave == needs.scratch_size_per_wave &&
1344        queue->ring_info.scratch_waves == needs.scratch_waves &&
1345        queue->ring_info.compute_scratch_size_per_wave == needs.compute_scratch_size_per_wave &&
1346        queue->ring_info.compute_scratch_waves == needs.compute_scratch_waves &&
1347        queue->ring_info.esgs_ring_size == needs.esgs_ring_size &&
1348        queue->ring_info.gsvs_ring_size == needs.gsvs_ring_size && queue->ring_info.tess_rings == needs.tess_rings &&
1349        queue->ring_info.task_rings == needs.task_rings &&
1350        queue->ring_info.mesh_scratch_ring == needs.mesh_scratch_ring &&
1351        queue->ring_info.attr_ring_size == needs.attr_ring_size && queue->ring_info.gds == needs.gds &&
1352        queue->ring_info.gds_oa == needs.gds_oa && queue->ring_info.sample_positions == needs.sample_positions)
1353       return VK_SUCCESS;
1354 
1355    return radv_update_preamble_cs(queue, device, &needs);
1356 }
1357 
1358 static VkResult
radv_create_gang_wait_preambles_postambles(struct radv_queue * queue)1359 radv_create_gang_wait_preambles_postambles(struct radv_queue *queue)
1360 {
1361    struct radv_device *device = radv_queue_device(queue);
1362    const struct radv_physical_device *pdev = radv_device_physical(device);
1363 
1364    if (queue->gang_sem_bo)
1365       return VK_SUCCESS;
1366 
1367    VkResult r = VK_SUCCESS;
1368    struct radeon_winsys *ws = device->ws;
1369    const enum amd_ip_type leader_ip = radv_queue_family_to_ring(pdev, queue->state.qf);
1370    struct radeon_winsys_bo *gang_sem_bo = NULL;
1371 
1372    /* Gang semaphores BO.
1373     * DWORD 0: used in preambles, gang leader writes, gang members wait.
1374     * DWORD 1: used in postambles, gang leader waits, gang members write.
1375     */
1376    r = radv_bo_create(device, NULL, 8, 4, RADEON_DOMAIN_VRAM,
1377                       RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM, RADV_BO_PRIORITY_SCRATCH, 0, true,
1378                       &gang_sem_bo);
1379    if (r != VK_SUCCESS)
1380       return r;
1381 
1382    struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip, false);
1383    struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip, false);
1384    struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1385    struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
1386 
1387    if (!leader_pre_cs || !leader_post_cs || !ace_pre_cs || !ace_post_cs) {
1388       r = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1389       goto fail;
1390    }
1391 
1392    radeon_check_space(ws, leader_pre_cs, 256);
1393    radeon_check_space(ws, leader_post_cs, 256);
1394    radeon_check_space(ws, ace_pre_cs, 256);
1395    radeon_check_space(ws, ace_post_cs, 256);
1396 
1397    radv_cs_add_buffer(ws, leader_pre_cs, gang_sem_bo);
1398    radv_cs_add_buffer(ws, leader_post_cs, gang_sem_bo);
1399    radv_cs_add_buffer(ws, ace_pre_cs, gang_sem_bo);
1400    radv_cs_add_buffer(ws, ace_post_cs, gang_sem_bo);
1401 
1402    const uint64_t ace_wait_va = radv_buffer_get_va(gang_sem_bo);
1403    const uint64_t leader_wait_va = ace_wait_va + 4;
1404    const uint32_t zero = 0;
1405    const uint32_t one = 1;
1406 
1407    /* Preambles for gang submission.
1408     * Make gang members wait until the gang leader starts.
1409     * Userspace is required to emit this wait to make sure it behaves correctly
1410     * in a multi-process environment, because task shader dispatches are not
1411     * meant to be executed on multiple compute engines at the same time.
1412     */
1413    radv_cp_wait_mem(ace_pre_cs, RADV_QUEUE_COMPUTE, WAIT_REG_MEM_GREATER_OR_EQUAL, ace_wait_va, 1, 0xffffffff);
1414    radv_cs_write_data(device, ace_pre_cs, RADV_QUEUE_COMPUTE, V_370_ME, ace_wait_va, 1, &zero, false);
1415    radv_cs_write_data(device, leader_pre_cs, queue->state.qf, V_370_ME, ace_wait_va, 1, &one, false);
1416 
1417    /* Create postambles for gang submission.
1418     * This ensures that the gang leader waits for the whole gang,
1419     * which is necessary because the kernel signals the userspace fence
1420     * as soon as the gang leader is done, which may lead to bugs because the
1421     * same command buffers could be submitted again while still being executed.
1422     */
1423    radv_cp_wait_mem(leader_post_cs, queue->state.qf, WAIT_REG_MEM_GREATER_OR_EQUAL, leader_wait_va, 1, 0xffffffff);
1424    radv_cs_write_data(device, leader_post_cs, queue->state.qf, V_370_ME, leader_wait_va, 1, &zero, false);
1425    radv_cs_emit_write_event_eop(ace_post_cs, pdev->info.gfx_level, RADV_QUEUE_COMPUTE, V_028A90_BOTTOM_OF_PIPE_TS, 0,
1426                                 EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, leader_wait_va, 1, 0);
1427 
1428    r = ws->cs_finalize(leader_pre_cs);
1429    if (r != VK_SUCCESS)
1430       goto fail;
1431    r = ws->cs_finalize(leader_post_cs);
1432    if (r != VK_SUCCESS)
1433       goto fail;
1434    r = ws->cs_finalize(ace_pre_cs);
1435    if (r != VK_SUCCESS)
1436       goto fail;
1437    r = ws->cs_finalize(ace_post_cs);
1438    if (r != VK_SUCCESS)
1439       goto fail;
1440 
1441    queue->gang_sem_bo = gang_sem_bo;
1442    queue->state.gang_wait_preamble_cs = leader_pre_cs;
1443    queue->state.gang_wait_postamble_cs = leader_post_cs;
1444    queue->follower_state->gang_wait_preamble_cs = ace_pre_cs;
1445    queue->follower_state->gang_wait_postamble_cs = ace_post_cs;
1446 
1447    return VK_SUCCESS;
1448 
1449 fail:
1450    if (leader_pre_cs)
1451       ws->cs_destroy(leader_pre_cs);
1452    if (leader_post_cs)
1453       ws->cs_destroy(leader_post_cs);
1454    if (ace_pre_cs)
1455       ws->cs_destroy(ace_pre_cs);
1456    if (ace_post_cs)
1457       ws->cs_destroy(ace_post_cs);
1458    if (gang_sem_bo)
1459       radv_bo_destroy(device, &queue->vk.base, gang_sem_bo);
1460 
1461    return r;
1462 }
1463 
1464 static bool
radv_queue_init_follower_state(struct radv_queue * queue)1465 radv_queue_init_follower_state(struct radv_queue *queue)
1466 {
1467    if (queue->follower_state)
1468       return true;
1469 
1470    queue->follower_state = calloc(1, sizeof(struct radv_queue_state));
1471    if (!queue->follower_state)
1472       return false;
1473 
1474    queue->follower_state->qf = RADV_QUEUE_COMPUTE;
1475    return true;
1476 }
1477 
1478 static VkResult
radv_update_gang_preambles(struct radv_queue * queue)1479 radv_update_gang_preambles(struct radv_queue *queue)
1480 {
1481    struct radv_device *device = radv_queue_device(queue);
1482 
1483    if (!radv_queue_init_follower_state(queue))
1484       return VK_ERROR_OUT_OF_HOST_MEMORY;
1485 
1486    VkResult r = VK_SUCCESS;
1487 
1488    /* Copy task rings state.
1489     * Task shaders that are submitted on the ACE queue need to share
1490     * their ring buffers with the mesh shaders on the GFX queue.
1491     */
1492    queue->follower_state->ring_info.task_rings = queue->state.ring_info.task_rings;
1493    queue->follower_state->task_rings_bo = queue->state.task_rings_bo;
1494 
1495    /* Copy some needed states from the parent queue state.
1496     * These can only increase so it's okay to copy them as-is without checking.
1497     * Note, task shaders use the scratch size from their graphics pipeline.
1498     */
1499    struct radv_queue_ring_info needs = queue->follower_state->ring_info;
1500    needs.compute_scratch_size_per_wave = queue->state.ring_info.scratch_size_per_wave;
1501    needs.compute_scratch_waves = queue->state.ring_info.scratch_waves;
1502    needs.task_rings = queue->state.ring_info.task_rings;
1503 
1504    r = radv_update_preamble_cs(queue->follower_state, device, &needs);
1505    if (r != VK_SUCCESS)
1506       return r;
1507 
1508    r = radv_create_gang_wait_preambles_postambles(queue);
1509    if (r != VK_SUCCESS)
1510       return r;
1511 
1512    return VK_SUCCESS;
1513 }
1514 
1515 static struct radeon_cmdbuf *
radv_create_perf_counter_lock_cs(struct radv_device * device,unsigned pass,bool unlock)1516 radv_create_perf_counter_lock_cs(struct radv_device *device, unsigned pass, bool unlock)
1517 {
1518    struct radeon_cmdbuf **cs_ref = &device->perf_counter_lock_cs[pass * 2 + (unlock ? 1 : 0)];
1519    struct radeon_cmdbuf *cs;
1520 
1521    if (*cs_ref)
1522       return *cs_ref;
1523 
1524    cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
1525    if (!cs)
1526       return NULL;
1527 
1528    ASSERTED unsigned cdw = radeon_check_space(device->ws, cs, 21);
1529 
1530    radv_cs_add_buffer(device->ws, cs, device->perf_counter_bo);
1531 
1532    if (!unlock) {
1533       uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1534       radeon_emit(cs, PKT3(PKT3_ATOMIC_MEM, 7, 0));
1535       radeon_emit(cs, ATOMIC_OP(TC_OP_ATOMIC_CMPSWAP_32) | ATOMIC_COMMAND(ATOMIC_COMMAND_LOOP));
1536       radeon_emit(cs, mutex_va);       /* addr lo */
1537       radeon_emit(cs, mutex_va >> 32); /* addr hi */
1538       radeon_emit(cs, 1);              /* data lo */
1539       radeon_emit(cs, 0);              /* data hi */
1540       radeon_emit(cs, 0);              /* compare data lo */
1541       radeon_emit(cs, 0);              /* compare data hi */
1542       radeon_emit(cs, 10);             /* loop interval */
1543    }
1544 
1545    uint64_t va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_PASS_OFFSET;
1546    uint64_t unset_va = va + (unlock ? 8 * pass : 0);
1547    uint64_t set_va = va + (unlock ? 0 : 8 * pass);
1548 
1549    radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1550    radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1551                       COPY_DATA_WR_CONFIRM);
1552    radeon_emit(cs, 0); /* immediate */
1553    radeon_emit(cs, 0);
1554    radeon_emit(cs, unset_va);
1555    radeon_emit(cs, unset_va >> 32);
1556 
1557    radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1558    radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1559                       COPY_DATA_WR_CONFIRM);
1560    radeon_emit(cs, 1); /* immediate */
1561    radeon_emit(cs, 0);
1562    radeon_emit(cs, set_va);
1563    radeon_emit(cs, set_va >> 32);
1564 
1565    if (unlock) {
1566       uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
1567 
1568       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1569       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | COPY_DATA_COUNT_SEL |
1570                          COPY_DATA_WR_CONFIRM);
1571       radeon_emit(cs, 0); /* immediate */
1572       radeon_emit(cs, 0);
1573       radeon_emit(cs, mutex_va);
1574       radeon_emit(cs, mutex_va >> 32);
1575    }
1576 
1577    assert(cs->cdw <= cdw);
1578 
1579    VkResult result = device->ws->cs_finalize(cs);
1580    if (result != VK_SUCCESS) {
1581       device->ws->cs_destroy(cs);
1582       return NULL;
1583    }
1584 
1585    /* All the casts are to avoid MSVC errors around pointer truncation in a non-taken
1586     * alternative.
1587     */
1588    if (p_atomic_cmpxchg((uintptr_t *)cs_ref, 0, (uintptr_t)cs) != 0) {
1589       device->ws->cs_destroy(cs);
1590    }
1591 
1592    return *cs_ref;
1593 }
1594 
1595 static void
radv_get_shader_upload_sync_wait(struct radv_device * device,uint64_t shader_upload_seq,struct vk_sync_wait * out_sync_wait)1596 radv_get_shader_upload_sync_wait(struct radv_device *device, uint64_t shader_upload_seq,
1597                                  struct vk_sync_wait *out_sync_wait)
1598 {
1599    struct vk_semaphore *semaphore = vk_semaphore_from_handle(device->shader_upload_sem);
1600    struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
1601    *out_sync_wait = (struct vk_sync_wait){
1602       .sync = sync,
1603       .wait_value = shader_upload_seq,
1604       .stage_mask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
1605    };
1606 }
1607 
1608 static VkResult
radv_queue_submit_normal(struct radv_queue * queue,struct vk_queue_submit * submission)1609 radv_queue_submit_normal(struct radv_queue *queue, struct vk_queue_submit *submission)
1610 {
1611    struct radv_device *device = radv_queue_device(queue);
1612    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1613    bool use_ace = false;
1614    bool use_perf_counters = false;
1615    VkResult result;
1616    uint64_t shader_upload_seq = 0;
1617    uint32_t wait_count = submission->wait_count;
1618    struct vk_sync_wait *waits = submission->waits;
1619 
1620    result = radv_update_preambles(&queue->state, device, submission->command_buffers, submission->command_buffer_count,
1621                                   &use_perf_counters, &use_ace);
1622    if (result != VK_SUCCESS)
1623       return result;
1624 
1625    if (use_ace) {
1626       result = radv_update_gang_preambles(queue);
1627       if (result != VK_SUCCESS)
1628          return result;
1629    }
1630 
1631    const unsigned cmd_buffer_count = submission->command_buffer_count;
1632    const unsigned max_cs_submission = radv_device_fault_detection_enabled(device) ? 1 : cmd_buffer_count;
1633    const unsigned cs_array_size = (use_ace ? 2 : 1) * MIN2(max_cs_submission, cmd_buffer_count);
1634 
1635    struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) * cs_array_size);
1636    if (!cs_array)
1637       return VK_ERROR_OUT_OF_HOST_MEMORY;
1638 
1639    if (radv_device_fault_detection_enabled(device))
1640       simple_mtx_lock(&device->trace_mtx);
1641 
1642    for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
1643       struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j];
1644       shader_upload_seq = MAX2(shader_upload_seq, cmd_buffer->shader_upload_seq);
1645    }
1646 
1647    if (shader_upload_seq > queue->last_shader_upload_seq) {
1648       /* Patch the wait array to add waiting for referenced shaders to upload. */
1649       struct vk_sync_wait *new_waits = malloc(sizeof(struct vk_sync_wait) * (wait_count + 1));
1650       if (!new_waits) {
1651          result = VK_ERROR_OUT_OF_HOST_MEMORY;
1652          goto fail;
1653       }
1654 
1655       memcpy(new_waits, submission->waits, sizeof(struct vk_sync_wait) * submission->wait_count);
1656       radv_get_shader_upload_sync_wait(device, shader_upload_seq, &new_waits[submission->wait_count]);
1657 
1658       waits = new_waits;
1659       wait_count += 1;
1660    }
1661 
1662    /* For fences on the same queue/vm amdgpu doesn't wait till all processing is finished
1663     * before starting the next cmdbuffer, so we need to do it here.
1664     */
1665    const bool need_wait = wait_count > 0;
1666    unsigned num_initial_preambles = 0;
1667    unsigned num_continue_preambles = 0;
1668    unsigned num_postambles = 0;
1669    struct radeon_cmdbuf *initial_preambles[5] = {0};
1670    struct radeon_cmdbuf *continue_preambles[5] = {0};
1671    struct radeon_cmdbuf *postambles[3] = {0};
1672 
1673    if (queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE) {
1674       initial_preambles[num_initial_preambles++] =
1675          need_wait ? queue->state.initial_full_flush_preamble_cs : queue->state.initial_preamble_cs;
1676 
1677       continue_preambles[num_continue_preambles++] = queue->state.continue_preamble_cs;
1678 
1679       if (use_perf_counters) {
1680          /* RADV only supports perf counters on the GFX queue currently. */
1681          assert(queue->state.qf == RADV_QUEUE_GENERAL);
1682 
1683          /* Create the lock/unlock CS. */
1684          struct radeon_cmdbuf *perf_ctr_lock_cs =
1685             radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, false);
1686          struct radeon_cmdbuf *perf_ctr_unlock_cs =
1687             radv_create_perf_counter_lock_cs(device, submission->perf_pass_index, true);
1688 
1689          if (!perf_ctr_lock_cs || !perf_ctr_unlock_cs) {
1690             result = VK_ERROR_OUT_OF_HOST_MEMORY;
1691             goto fail;
1692          }
1693 
1694          initial_preambles[num_initial_preambles++] = perf_ctr_lock_cs;
1695          continue_preambles[num_continue_preambles++] = perf_ctr_lock_cs;
1696          postambles[num_postambles++] = perf_ctr_unlock_cs;
1697       }
1698    }
1699 
1700    const unsigned num_1q_initial_preambles = num_initial_preambles;
1701    const unsigned num_1q_continue_preambles = num_continue_preambles;
1702    const unsigned num_1q_postambles = num_postambles;
1703 
1704    if (use_ace) {
1705       initial_preambles[num_initial_preambles++] = queue->state.gang_wait_preamble_cs;
1706       initial_preambles[num_initial_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1707       initial_preambles[num_initial_preambles++] =
1708          need_wait ? queue->follower_state->initial_full_flush_preamble_cs : queue->follower_state->initial_preamble_cs;
1709 
1710       continue_preambles[num_continue_preambles++] = queue->state.gang_wait_preamble_cs;
1711       continue_preambles[num_continue_preambles++] = queue->follower_state->gang_wait_preamble_cs;
1712       continue_preambles[num_continue_preambles++] = queue->follower_state->continue_preamble_cs;
1713 
1714       postambles[num_postambles++] = queue->follower_state->gang_wait_postamble_cs;
1715       postambles[num_postambles++] = queue->state.gang_wait_postamble_cs;
1716    }
1717 
1718    struct radv_winsys_submit_info submit = {
1719       .ip_type = radv_queue_ring(queue),
1720       .queue_index = queue->vk.index_in_family,
1721       .cs_array = cs_array,
1722       .cs_count = 0,
1723       .initial_preamble_count = num_1q_initial_preambles,
1724       .continue_preamble_count = num_1q_continue_preambles,
1725       .postamble_count = num_1q_postambles,
1726       .initial_preamble_cs = initial_preambles,
1727       .continue_preamble_cs = continue_preambles,
1728       .postamble_cs = postambles,
1729       .uses_shadow_regs = queue->state.uses_shadow_regs,
1730    };
1731 
1732    for (uint32_t j = 0, advance; j < cmd_buffer_count; j += advance) {
1733       advance = MIN2(max_cs_submission, cmd_buffer_count - j);
1734       const bool last_submit = j + advance == cmd_buffer_count;
1735       bool submit_ace = false;
1736       unsigned num_submitted_cs = 0;
1737 
1738       if (radv_device_fault_detection_enabled(device))
1739          device->trace_data->primary_id = 0;
1740 
1741       struct radeon_cmdbuf *chainable = NULL;
1742       struct radeon_cmdbuf *chainable_ace = NULL;
1743 
1744       /* Add CS from submitted command buffers. */
1745       for (unsigned c = 0; c < advance; ++c) {
1746          struct radv_cmd_buffer *cmd_buffer = (struct radv_cmd_buffer *)submission->command_buffers[j + c];
1747          assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1748          const bool can_chain_next = !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
1749 
1750          /* Follower needs to be before the gang leader because the last CS must match the queue's IP type. */
1751          if (cmd_buffer->gang.cs) {
1752             device->ws->cs_unchain(cmd_buffer->gang.cs);
1753             if (!chainable_ace || !device->ws->cs_chain(chainable_ace, cmd_buffer->gang.cs, false)) {
1754                cs_array[num_submitted_cs++] = cmd_buffer->gang.cs;
1755 
1756                /* Prevent chaining the gang leader when the follower couldn't be chained.
1757                 * Otherwise, they would be in the wrong order.
1758                 */
1759                chainable = NULL;
1760             }
1761 
1762             chainable_ace = can_chain_next ? cmd_buffer->gang.cs : NULL;
1763             submit_ace = true;
1764          }
1765 
1766          device->ws->cs_unchain(cmd_buffer->cs);
1767          if (!chainable || !device->ws->cs_chain(chainable, cmd_buffer->cs, queue->state.uses_shadow_regs)) {
1768             /* don't submit empty command buffers to the kernel. */
1769             if ((radv_queue_ring(queue) != AMD_IP_VCN_ENC && radv_queue_ring(queue) != AMD_IP_UVD) ||
1770                 cmd_buffer->cs->cdw != 0)
1771                cs_array[num_submitted_cs++] = cmd_buffer->cs;
1772          }
1773 
1774          chainable = can_chain_next ? cmd_buffer->cs : NULL;
1775       }
1776 
1777       submit.cs_count = num_submitted_cs;
1778       submit.initial_preamble_count = submit_ace ? num_initial_preambles : num_1q_initial_preambles;
1779       submit.continue_preamble_count = submit_ace ? num_continue_preambles : num_1q_continue_preambles;
1780       submit.postamble_count = submit_ace ? num_postambles : num_1q_postambles;
1781 
1782       result = device->ws->cs_submit(ctx, &submit, j == 0 ? wait_count : 0, waits,
1783                                      last_submit ? submission->signal_count : 0, submission->signals);
1784 
1785       if (result != VK_SUCCESS)
1786          goto fail;
1787 
1788       if (radv_device_fault_detection_enabled(device)) {
1789          result = radv_check_gpu_hangs(queue, &submit);
1790       }
1791 
1792       if (device->tma_bo) {
1793          radv_check_trap_handler(queue);
1794       }
1795 
1796       initial_preambles[0] = queue->state.initial_preamble_cs;
1797       initial_preambles[1] = !use_ace ? NULL : queue->follower_state->initial_preamble_cs;
1798    }
1799 
1800    queue->last_shader_upload_seq = MAX2(queue->last_shader_upload_seq, shader_upload_seq);
1801 
1802    radv_dump_printf_data(device, stdout);
1803 
1804 fail:
1805    free(cs_array);
1806    if (waits != submission->waits)
1807       free(waits);
1808    if (radv_device_fault_detection_enabled(device))
1809       simple_mtx_unlock(&device->trace_mtx);
1810 
1811    return result;
1812 }
1813 
1814 static void
radv_report_gpuvm_fault(struct radv_device * device)1815 radv_report_gpuvm_fault(struct radv_device *device)
1816 {
1817    const struct radv_physical_device *pdev = radv_device_physical(device);
1818    struct radv_winsys_gpuvm_fault_info fault_info = {0};
1819 
1820    if (!radv_vm_fault_occurred(device, &fault_info))
1821       return;
1822 
1823    fprintf(stderr, "radv: GPUVM fault detected at address 0x%08" PRIx64 ".\n", fault_info.addr);
1824    ac_print_gpuvm_fault_status(stderr, pdev->info.gfx_level, fault_info.status);
1825 }
1826 
1827 static VkResult
radv_queue_sparse_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1828 radv_queue_sparse_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1829 {
1830    struct radv_queue *queue = (struct radv_queue *)vqueue;
1831    struct radv_device *device = radv_queue_device(queue);
1832    VkResult result;
1833 
1834    result = radv_queue_submit_bind_sparse_memory(device, submission);
1835    if (result != VK_SUCCESS)
1836       goto fail;
1837 
1838    /* We do a CPU wait here, in part to avoid more winsys mechanisms. In the likely kernel explicit
1839     * sync mechanism, we'd need to do a CPU wait anyway. Haven't seen this be a perf issue yet, but
1840     * we have to make sure the queue always has its submission thread enabled. */
1841    result = vk_sync_wait_many(&device->vk, submission->wait_count, submission->waits, 0, UINT64_MAX);
1842    if (result != VK_SUCCESS)
1843       goto fail;
1844 
1845    /* Ignore all the commandbuffers. They're necessarily empty anyway. */
1846 
1847    for (unsigned i = 0; i < submission->signal_count; ++i) {
1848       result = vk_sync_signal(&device->vk, submission->signals[i].sync, submission->signals[i].signal_value);
1849       if (result != VK_SUCCESS)
1850          goto fail;
1851    }
1852 
1853 fail:
1854    if (result != VK_SUCCESS) {
1855       /* When something bad happened during the submission, such as
1856        * an out of memory issue, it might be hard to recover from
1857        * this inconsistent state. To avoid this sort of problem, we
1858        * assume that we are in a really bad situation and return
1859        * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1860        * to submit the same job again to this device.
1861        */
1862       radv_report_gpuvm_fault(device);
1863       result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1864    }
1865    return result;
1866 }
1867 
1868 static VkResult
radv_queue_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)1869 radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
1870 {
1871    struct radv_queue *queue = (struct radv_queue *)vqueue;
1872    struct radv_device *device = radv_queue_device(queue);
1873    const struct radv_physical_device *pdev = radv_device_physical(device);
1874    VkResult result;
1875 
1876    if (!radv_sparse_queue_enabled(pdev)) {
1877       result = radv_queue_submit_bind_sparse_memory(device, submission);
1878       if (result != VK_SUCCESS)
1879          goto fail;
1880    } else {
1881       assert(!submission->buffer_bind_count && !submission->image_bind_count && !submission->image_opaque_bind_count);
1882    }
1883 
1884    if (!submission->command_buffer_count && !submission->wait_count && !submission->signal_count)
1885       return VK_SUCCESS;
1886 
1887    if (!submission->command_buffer_count) {
1888       result = radv_queue_submit_empty(queue, submission);
1889    } else {
1890       result = radv_queue_submit_normal(queue, submission);
1891    }
1892 
1893 fail:
1894    if (result != VK_SUCCESS) {
1895       /* When something bad happened during the submission, such as
1896        * an out of memory issue, it might be hard to recover from
1897        * this inconsistent state. To avoid this sort of problem, we
1898        * assume that we are in a really bad situation and return
1899        * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
1900        * to submit the same job again to this device.
1901        */
1902       radv_report_gpuvm_fault(device);
1903       result = vk_device_set_lost(&device->vk, "vkQueueSubmit() failed");
1904    }
1905    return result;
1906 }
1907 
1908 bool
radv_queue_internal_submit(struct radv_queue * queue,struct radeon_cmdbuf * cs)1909 radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
1910 {
1911    struct radv_device *device = radv_queue_device(queue);
1912    struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1913    struct radv_winsys_submit_info submit = {
1914       .ip_type = radv_queue_ring(queue),
1915       .queue_index = queue->vk.index_in_family,
1916       .cs_array = &cs,
1917       .cs_count = 1,
1918    };
1919 
1920    VkResult result = device->ws->cs_submit(ctx, &submit, 0, NULL, 0, NULL);
1921    if (result != VK_SUCCESS)
1922       return false;
1923 
1924    return true;
1925 }
1926 
1927 int
radv_queue_init(struct radv_device * device,struct radv_queue * queue,int idx,const VkDeviceQueueCreateInfo * create_info,const VkDeviceQueueGlobalPriorityCreateInfoKHR * global_priority)1928 radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx,
1929                 const VkDeviceQueueCreateInfo *create_info,
1930                 const VkDeviceQueueGlobalPriorityCreateInfoKHR *global_priority)
1931 {
1932    const struct radv_physical_device *pdev = radv_device_physical(device);
1933 
1934    queue->priority = radv_get_queue_global_priority(global_priority);
1935    queue->hw_ctx = device->hw_ctx[queue->priority];
1936    queue->state.qf = vk_queue_to_radv(pdev, create_info->queueFamilyIndex);
1937 
1938    VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
1939    if (result != VK_SUCCESS)
1940       return result;
1941 
1942    queue->state.uses_shadow_regs = device->uses_shadow_regs && queue->state.qf == RADV_QUEUE_GENERAL;
1943    if (queue->state.uses_shadow_regs) {
1944       result = radv_create_shadow_regs_preamble(device, &queue->state);
1945       if (result != VK_SUCCESS)
1946          goto fail;
1947       result = radv_init_shadowed_regs_buffer_state(device, queue);
1948       if (result != VK_SUCCESS)
1949          goto fail;
1950    }
1951 
1952    if (queue->state.qf == RADV_QUEUE_SPARSE) {
1953       queue->vk.driver_submit = radv_queue_sparse_submit;
1954       vk_queue_enable_submit_thread(&queue->vk);
1955    } else {
1956       queue->vk.driver_submit = radv_queue_submit;
1957    }
1958    return VK_SUCCESS;
1959 fail:
1960    vk_queue_finish(&queue->vk);
1961    return result;
1962 }
1963 
1964 static void
radv_queue_state_finish(struct radv_queue_state * queue,struct radv_device * device)1965 radv_queue_state_finish(struct radv_queue_state *queue, struct radv_device *device)
1966 {
1967    radv_destroy_shadow_regs_preamble(device, queue, device->ws);
1968    if (queue->initial_full_flush_preamble_cs)
1969       device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1970    if (queue->initial_preamble_cs)
1971       device->ws->cs_destroy(queue->initial_preamble_cs);
1972    if (queue->continue_preamble_cs)
1973       device->ws->cs_destroy(queue->continue_preamble_cs);
1974    if (queue->gang_wait_preamble_cs)
1975       device->ws->cs_destroy(queue->gang_wait_preamble_cs);
1976    if (queue->gang_wait_postamble_cs)
1977       device->ws->cs_destroy(queue->gang_wait_postamble_cs);
1978    if (queue->descriptor_bo)
1979       radv_bo_destroy(device, NULL, queue->descriptor_bo);
1980    if (queue->scratch_bo) {
1981       radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo);
1982       radv_bo_destroy(device, NULL, queue->scratch_bo);
1983    }
1984    if (queue->esgs_ring_bo) {
1985       radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo);
1986       radv_bo_destroy(device, NULL, queue->esgs_ring_bo);
1987    }
1988    if (queue->gsvs_ring_bo) {
1989       radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo);
1990       radv_bo_destroy(device, NULL, queue->gsvs_ring_bo);
1991    }
1992    if (queue->tess_rings_bo) {
1993       radv_rmv_log_command_buffer_bo_destroy(device, queue->tess_rings_bo);
1994       radv_bo_destroy(device, NULL, queue->tess_rings_bo);
1995    }
1996    if (queue->task_rings_bo) {
1997       radv_rmv_log_command_buffer_bo_destroy(device, queue->task_rings_bo);
1998       radv_bo_destroy(device, NULL, queue->task_rings_bo);
1999    }
2000    if (queue->mesh_scratch_ring_bo) {
2001       radv_rmv_log_command_buffer_bo_destroy(device, queue->mesh_scratch_ring_bo);
2002       radv_bo_destroy(device, NULL, queue->mesh_scratch_ring_bo);
2003    }
2004    if (queue->attr_ring_bo) {
2005       radv_rmv_log_command_buffer_bo_destroy(device, queue->attr_ring_bo);
2006       radv_bo_destroy(device, NULL, queue->attr_ring_bo);
2007    }
2008    if (queue->gds_bo) {
2009       device->ws->buffer_make_resident(device->ws, queue->gds_bo, false);
2010       radv_bo_destroy(device, NULL, queue->gds_bo);
2011    }
2012    if (queue->gds_oa_bo) {
2013       device->ws->buffer_make_resident(device->ws, queue->gds_oa_bo, false);
2014       radv_bo_destroy(device, NULL, queue->gds_oa_bo);
2015    }
2016    if (queue->compute_scratch_bo) {
2017       radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo);
2018       radv_bo_destroy(device, NULL, queue->compute_scratch_bo);
2019    }
2020 }
2021 
2022 void
radv_queue_finish(struct radv_queue * queue)2023 radv_queue_finish(struct radv_queue *queue)
2024 {
2025    struct radv_device *device = radv_queue_device(queue);
2026 
2027    if (queue->follower_state) {
2028       /* Prevent double free */
2029       queue->follower_state->task_rings_bo = NULL;
2030 
2031       /* Clean up the internal ACE queue state. */
2032       radv_queue_state_finish(queue->follower_state, device);
2033       free(queue->follower_state);
2034    }
2035 
2036    if (queue->gang_sem_bo)
2037       radv_bo_destroy(device, &queue->vk.base, queue->gang_sem_bo);
2038 
2039    radv_queue_state_finish(&queue->state, device);
2040    vk_queue_finish(&queue->vk);
2041 }
2042 
2043 enum amd_ip_type
radv_queue_ring(const struct radv_queue * queue)2044 radv_queue_ring(const struct radv_queue *queue)
2045 {
2046    struct radv_device *device = radv_queue_device(queue);
2047    const struct radv_physical_device *pdev = radv_device_physical(device);
2048    return radv_queue_family_to_ring(pdev, queue->state.qf);
2049 }
2050 
2051 enum amd_ip_type
radv_queue_family_to_ring(const struct radv_physical_device * pdev,enum radv_queue_family f)2052 radv_queue_family_to_ring(const struct radv_physical_device *pdev, enum radv_queue_family f)
2053 {
2054    switch (f) {
2055    case RADV_QUEUE_GENERAL:
2056       return AMD_IP_GFX;
2057    case RADV_QUEUE_COMPUTE:
2058       return AMD_IP_COMPUTE;
2059    case RADV_QUEUE_TRANSFER:
2060       return AMD_IP_SDMA;
2061    case RADV_QUEUE_VIDEO_DEC:
2062       return pdev->vid_decode_ip;
2063    case RADV_QUEUE_VIDEO_ENC:
2064       return AMD_IP_VCN_ENC;
2065    default:
2066       unreachable("Unknown queue family");
2067    }
2068 }
2069