xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/radv_pipeline_compute.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * SPDX-License-Identifier: MIT
9  */
10 
11 #include "meta/radv_meta.h"
12 #include "nir/nir.h"
13 #include "nir/nir_builder.h"
14 #include "nir/nir_serialize.h"
15 #include "nir/radv_nir.h"
16 #include "spirv/nir_spirv.h"
17 #include "util/disk_cache.h"
18 #include "util/mesa-sha1.h"
19 #include "util/os_time.h"
20 #include "util/u_atomic.h"
21 #include "radv_cs.h"
22 #include "radv_debug.h"
23 #include "radv_pipeline_binary.h"
24 #include "radv_pipeline_cache.h"
25 #include "radv_rmv.h"
26 #include "radv_shader.h"
27 #include "radv_shader_args.h"
28 #include "vk_nir_convert_ycbcr.h"
29 #include "vk_pipeline.h"
30 #include "vk_render_pass.h"
31 #include "vk_util.h"
32 
33 #include "util/u_debug.h"
34 #include "ac_binary.h"
35 #include "ac_nir.h"
36 #include "ac_shader_util.h"
37 #include "aco_interface.h"
38 #include "sid.h"
39 #include "vk_format.h"
40 
41 uint32_t
radv_get_compute_resource_limits(const struct radv_physical_device * pdev,const struct radv_shader_info * info)42 radv_get_compute_resource_limits(const struct radv_physical_device *pdev, const struct radv_shader_info *info)
43 {
44    unsigned threads_per_threadgroup;
45    unsigned threadgroups_per_cu = 1;
46    unsigned waves_per_threadgroup;
47    unsigned max_waves_per_sh = 0;
48 
49    /* Calculate best compute resource limits. */
50    threads_per_threadgroup = info->cs.block_size[0] * info->cs.block_size[1] * info->cs.block_size[2];
51    waves_per_threadgroup = DIV_ROUND_UP(threads_per_threadgroup, info->wave_size);
52 
53    if (pdev->info.gfx_level >= GFX10 && waves_per_threadgroup == 1)
54       threadgroups_per_cu = 2;
55 
56    return ac_get_compute_resource_limits(&pdev->info, waves_per_threadgroup, max_waves_per_sh, threadgroups_per_cu);
57 }
58 
59 void
radv_get_compute_shader_metadata(const struct radv_device * device,const struct radv_shader * cs,struct radv_compute_pipeline_metadata * metadata)60 radv_get_compute_shader_metadata(const struct radv_device *device, const struct radv_shader *cs,
61                                  struct radv_compute_pipeline_metadata *metadata)
62 {
63    uint32_t upload_sgpr = 0, inline_sgpr = 0;
64 
65    memset(metadata, 0, sizeof(*metadata));
66 
67    metadata->wave32 = cs->info.wave_size == 32;
68 
69    metadata->grid_base_sgpr = radv_get_user_sgpr(cs, AC_UD_CS_GRID_SIZE);
70 
71    upload_sgpr = radv_get_user_sgpr(cs, AC_UD_PUSH_CONSTANTS);
72    inline_sgpr = radv_get_user_sgpr(cs, AC_UD_INLINE_PUSH_CONSTANTS);
73 
74    metadata->push_const_sgpr = upload_sgpr | (inline_sgpr << 16);
75    metadata->inline_push_const_mask = cs->info.inline_push_constant_mask;
76 
77    metadata->indirect_desc_sets_sgpr = radv_get_user_sgpr(cs, AC_UD_INDIRECT_DESCRIPTOR_SETS);
78 }
79 
80 void
radv_compute_pipeline_init(struct radv_compute_pipeline * pipeline,const struct radv_pipeline_layout * layout,struct radv_shader * shader)81 radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const struct radv_pipeline_layout *layout,
82                            struct radv_shader *shader)
83 {
84    pipeline->base.need_indirect_descriptor_sets |= radv_shader_need_indirect_descriptor_sets(shader);
85 
86    pipeline->base.push_constant_size = layout->push_constant_size;
87    pipeline->base.dynamic_offset_count = layout->dynamic_offset_count;
88 }
89 
90 struct radv_shader *
radv_compile_cs(struct radv_device * device,struct vk_pipeline_cache * cache,struct radv_shader_stage * cs_stage,bool keep_executable_info,bool keep_statistic_info,bool is_internal,struct radv_shader_binary ** cs_binary)91 radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, struct radv_shader_stage *cs_stage,
92                 bool keep_executable_info, bool keep_statistic_info, bool is_internal,
93                 struct radv_shader_binary **cs_binary)
94 {
95    struct radv_physical_device *pdev = radv_device_physical(device);
96    struct radv_instance *instance = radv_physical_device_instance(pdev);
97 
98    struct radv_shader *cs_shader;
99 
100    /* Compile SPIR-V shader to NIR. */
101    cs_stage->nir = radv_shader_spirv_to_nir(device, cs_stage, NULL, is_internal);
102 
103    radv_optimize_nir(cs_stage->nir, cs_stage->key.optimisations_disabled);
104 
105    /* Gather info again, information such as outputs_read can be out-of-date. */
106    nir_shader_gather_info(cs_stage->nir, nir_shader_get_entrypoint(cs_stage->nir));
107 
108    /* Run the shader info pass. */
109    radv_nir_shader_info_init(cs_stage->stage, MESA_SHADER_NONE, &cs_stage->info);
110    radv_nir_shader_info_pass(device, cs_stage->nir, &cs_stage->layout, &cs_stage->key, NULL, RADV_PIPELINE_COMPUTE,
111                              false, &cs_stage->info);
112 
113    radv_declare_shader_args(device, NULL, &cs_stage->info, MESA_SHADER_COMPUTE, MESA_SHADER_NONE, &cs_stage->args);
114 
115    cs_stage->info.user_sgprs_locs = cs_stage->args.user_sgprs_locs;
116    cs_stage->info.inline_push_constant_mask = cs_stage->args.ac.inline_push_const_mask;
117 
118    /* Postprocess NIR. */
119    radv_postprocess_nir(device, NULL, cs_stage);
120 
121    bool dump_shader = radv_can_dump_shader(device, cs_stage->nir, false);
122 
123    if (dump_shader) {
124       simple_mtx_lock(&instance->shader_dump_mtx);
125       nir_print_shader(cs_stage->nir, stderr);
126    }
127 
128    /* Compile NIR shader to AMD assembly. */
129    *cs_binary =
130       radv_shader_nir_to_asm(device, cs_stage, &cs_stage->nir, 1, NULL, keep_executable_info, keep_statistic_info);
131 
132    cs_shader = radv_shader_create(device, cache, *cs_binary, keep_executable_info || dump_shader);
133 
134    radv_shader_generate_debug_info(device, dump_shader, keep_executable_info, *cs_binary, cs_shader, &cs_stage->nir, 1,
135                                    &cs_stage->info);
136 
137    if (dump_shader)
138       simple_mtx_unlock(&instance->shader_dump_mtx);
139 
140    if (keep_executable_info && cs_stage->spirv.size) {
141       cs_shader->spirv = malloc(cs_stage->spirv.size);
142       memcpy(cs_shader->spirv, cs_stage->spirv.data, cs_stage->spirv.size);
143       cs_shader->spirv_size = cs_stage->spirv.size;
144    }
145 
146    return cs_shader;
147 }
148 
149 void
radv_compute_pipeline_hash(const struct radv_device * device,const VkComputePipelineCreateInfo * pCreateInfo,unsigned char * hash)150 radv_compute_pipeline_hash(const struct radv_device *device, const VkComputePipelineCreateInfo *pCreateInfo,
151                            unsigned char *hash)
152 {
153    VkPipelineCreateFlags2KHR create_flags = vk_compute_pipeline_create_flags(pCreateInfo);
154    VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
155    const VkPipelineShaderStageCreateInfo *sinfo = &pCreateInfo->stage;
156    struct mesa_sha1 ctx;
157 
158    struct radv_shader_stage_key stage_key =
159       radv_pipeline_get_shader_key(device, sinfo, create_flags, pCreateInfo->pNext);
160 
161    _mesa_sha1_init(&ctx);
162    radv_pipeline_hash(device, pipeline_layout, &ctx);
163    radv_pipeline_hash_shader_stage(create_flags, sinfo, &stage_key, &ctx);
164    _mesa_sha1_final(&ctx, hash);
165 }
166 
167 static VkResult
radv_compute_pipeline_compile(const VkComputePipelineCreateInfo * pCreateInfo,struct radv_compute_pipeline * pipeline,struct radv_pipeline_layout * pipeline_layout,struct radv_device * device,struct vk_pipeline_cache * cache,const VkPipelineShaderStageCreateInfo * pStage,const VkPipelineCreationFeedbackCreateInfo * creation_feedback)168 radv_compute_pipeline_compile(const VkComputePipelineCreateInfo *pCreateInfo, struct radv_compute_pipeline *pipeline,
169                               struct radv_pipeline_layout *pipeline_layout, struct radv_device *device,
170                               struct vk_pipeline_cache *cache, const VkPipelineShaderStageCreateInfo *pStage,
171                               const VkPipelineCreationFeedbackCreateInfo *creation_feedback)
172 {
173    struct radv_shader_binary *cs_binary = NULL;
174    bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.create_flags);
175    bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.create_flags);
176    struct radv_shader_stage cs_stage = {0};
177    VkPipelineCreationFeedback pipeline_feedback = {
178       .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
179    };
180    bool skip_shaders_cache = false;
181    VkResult result = VK_SUCCESS;
182 
183    int64_t pipeline_start = os_time_get_nano();
184 
185    radv_compute_pipeline_hash(device, pCreateInfo, pipeline->base.sha1);
186 
187    pipeline->base.pipeline_hash = *(uint64_t *)pipeline->base.sha1;
188 
189    /* Skip the shaders cache when any of the below are true:
190     * - shaders are captured because it's for debugging purposes
191     * - binaries are captured for later uses
192     */
193    if (keep_executable_info || (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR)) {
194       skip_shaders_cache = true;
195    }
196 
197    bool found_in_application_cache = true;
198    if (!skip_shaders_cache &&
199        radv_compute_pipeline_cache_search(device, cache, pipeline, &found_in_application_cache)) {
200       if (found_in_application_cache)
201          pipeline_feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
202       result = VK_SUCCESS;
203       goto done;
204    }
205 
206    if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
207       return VK_PIPELINE_COMPILE_REQUIRED;
208 
209    int64_t stage_start = os_time_get_nano();
210 
211    const struct radv_shader_stage_key stage_key =
212       radv_pipeline_get_shader_key(device, &pCreateInfo->stage, pipeline->base.create_flags, pCreateInfo->pNext);
213 
214    radv_pipeline_stage_init(pipeline->base.create_flags, pStage, pipeline_layout, &stage_key, &cs_stage);
215 
216    pipeline->base.shaders[MESA_SHADER_COMPUTE] = radv_compile_cs(
217       device, cache, &cs_stage, keep_executable_info, keep_statistic_info, pipeline->base.is_internal, &cs_binary);
218 
219    cs_stage.feedback.duration += os_time_get_nano() - stage_start;
220 
221    if (!skip_shaders_cache) {
222       radv_pipeline_cache_insert(device, cache, &pipeline->base);
223    }
224 
225    free(cs_binary);
226    if (radv_can_dump_shader_stats(device, cs_stage.nir)) {
227       radv_dump_shader_stats(device, &pipeline->base, pipeline->base.shaders[MESA_SHADER_COMPUTE], MESA_SHADER_COMPUTE,
228                              stderr);
229    }
230    ralloc_free(cs_stage.nir);
231 
232 done:
233    pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
234 
235    if (creation_feedback) {
236       *creation_feedback->pPipelineCreationFeedback = pipeline_feedback;
237 
238       if (creation_feedback->pipelineStageCreationFeedbackCount) {
239          assert(creation_feedback->pipelineStageCreationFeedbackCount == 1);
240          creation_feedback->pPipelineStageCreationFeedbacks[0] = cs_stage.feedback;
241       }
242    }
243 
244    return result;
245 }
246 
247 static VkResult
radv_compute_pipeline_import_binary(struct radv_device * device,struct radv_compute_pipeline * pipeline,const VkPipelineBinaryInfoKHR * binary_info)248 radv_compute_pipeline_import_binary(struct radv_device *device, struct radv_compute_pipeline *pipeline,
249                                     const VkPipelineBinaryInfoKHR *binary_info)
250 {
251    VK_FROM_HANDLE(radv_pipeline_binary, pipeline_binary, binary_info->pPipelineBinaries[0]);
252    struct radv_shader *shader;
253    struct blob_reader blob;
254 
255    assert(binary_info->binaryCount == 1);
256 
257    blob_reader_init(&blob, pipeline_binary->data, pipeline_binary->size);
258 
259    shader = radv_shader_deserialize(device, pipeline_binary->key, sizeof(pipeline_binary->key), &blob);
260    if (!shader)
261       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
262 
263    pipeline->base.shaders[MESA_SHADER_COMPUTE] = shader;
264 
265    pipeline->base.pipeline_hash = *(uint64_t *)pipeline_binary->key;
266 
267    return VK_SUCCESS;
268 }
269 
270 VkResult
radv_compute_pipeline_create(VkDevice _device,VkPipelineCache _cache,const VkComputePipelineCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipeline)271 radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkComputePipelineCreateInfo *pCreateInfo,
272                              const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
273 {
274    VK_FROM_HANDLE(radv_device, device, _device);
275    VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
276    VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
277    struct radv_compute_pipeline *pipeline;
278    VkResult result;
279 
280    pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
281    if (pipeline == NULL) {
282       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
283    }
284 
285    radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_COMPUTE);
286    pipeline->base.create_flags = vk_compute_pipeline_create_flags(pCreateInfo);
287    pipeline->base.is_internal = _cache == device->meta_state.cache;
288 
289    const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
290       vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
291 
292    const VkPipelineBinaryInfoKHR *binary_info = vk_find_struct_const(pCreateInfo->pNext, PIPELINE_BINARY_INFO_KHR);
293 
294    if (binary_info && binary_info->binaryCount > 0) {
295       result = radv_compute_pipeline_import_binary(device, pipeline, binary_info);
296    } else {
297       result = radv_compute_pipeline_compile(pCreateInfo, pipeline, pipeline_layout, device, cache, &pCreateInfo->stage,
298                                              creation_feedback);
299    }
300 
301    if (result != VK_SUCCESS) {
302       radv_pipeline_destroy(device, &pipeline->base, pAllocator);
303       return result;
304    }
305 
306    radv_compute_pipeline_init(pipeline, pipeline_layout, pipeline->base.shaders[MESA_SHADER_COMPUTE]);
307 
308    if (pipeline->base.create_flags & VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV) {
309       const VkComputePipelineIndirectBufferInfoNV *indirect_buffer =
310          vk_find_struct_const(pCreateInfo->pNext, COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV);
311       struct radv_shader *shader = pipeline->base.shaders[MESA_SHADER_COMPUTE];
312       const struct radv_physical_device *pdev = radv_device_physical(device);
313       struct radeon_cmdbuf *cs = &pipeline->indirect.cs;
314 
315       cs->reserved_dw = cs->max_dw = 32;
316       cs->buf = malloc(cs->max_dw * 4);
317       if (!cs->buf) {
318          radv_pipeline_destroy(device, &pipeline->base, pAllocator);
319          return result;
320       }
321 
322       radv_emit_compute_shader(pdev, cs, shader);
323 
324       pipeline->indirect.va = indirect_buffer->deviceAddress;
325       pipeline->indirect.size = indirect_buffer->size;
326 
327       /* vkCmdUpdatePipelineIndirectBufferNV() can be called on any queues supporting transfer
328        * operations and it's not required to call it on the same queue as the DGC execute. Because
329        * it's not possible to know if the compute shader uses scratch when DGC execute is called,
330        * the only solution is gather the max scratch size of all indirect pipelines.
331        */
332       simple_mtx_lock(&device->compute_scratch_mtx);
333       device->compute_scratch_size_per_wave =
334          MAX2(device->compute_scratch_size_per_wave, shader->config.scratch_bytes_per_wave);
335       device->compute_scratch_waves = MAX2(device->compute_scratch_waves, radv_get_max_scratch_waves(device, shader));
336       simple_mtx_unlock(&device->compute_scratch_mtx);
337    }
338 
339    *pPipeline = radv_pipeline_to_handle(&pipeline->base);
340    radv_rmv_log_compute_pipeline_create(device, &pipeline->base, pipeline->base.is_internal);
341    return VK_SUCCESS;
342 }
343 
344 static VkResult
radv_create_compute_pipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)345 radv_create_compute_pipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
346                               const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
347                               VkPipeline *pPipelines)
348 {
349    VkResult result = VK_SUCCESS;
350 
351    unsigned i = 0;
352    for (; i < count; i++) {
353       VkResult r;
354       r = radv_compute_pipeline_create(_device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]);
355       if (r != VK_SUCCESS) {
356          result = r;
357          pPipelines[i] = VK_NULL_HANDLE;
358 
359          VkPipelineCreateFlagBits2KHR create_flags = vk_compute_pipeline_create_flags(&pCreateInfos[i]);
360          if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
361             break;
362       }
363    }
364 
365    for (; i < count; ++i)
366       pPipelines[i] = VK_NULL_HANDLE;
367 
368    return result;
369 }
370 
371 void
radv_destroy_compute_pipeline(struct radv_device * device,struct radv_compute_pipeline * pipeline)372 radv_destroy_compute_pipeline(struct radv_device *device, struct radv_compute_pipeline *pipeline)
373 {
374    struct radeon_cmdbuf *cs = &pipeline->indirect.cs;
375 
376    if (pipeline->base.shaders[MESA_SHADER_COMPUTE])
377       radv_shader_unref(device, pipeline->base.shaders[MESA_SHADER_COMPUTE]);
378 
379    free(cs->buf);
380 }
381 
382 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateComputePipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)383 radv_CreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
384                             const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
385                             VkPipeline *pPipelines)
386 {
387    return radv_create_compute_pipelines(_device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
388 }
389