xref: /aosp_15_r20/external/mesa3d/src/vulkan/runtime/vk_shader.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2024 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vk_shader.h"
25 
26 #include "vk_alloc.h"
27 #include "vk_command_buffer.h"
28 #include "vk_common_entrypoints.h"
29 #include "vk_descriptor_set_layout.h"
30 #include "vk_device.h"
31 #include "vk_nir.h"
32 #include "vk_physical_device.h"
33 #include "vk_pipeline.h"
34 
35 #include "util/mesa-sha1.h"
36 
37 void *
vk_shader_zalloc(struct vk_device * device,const struct vk_shader_ops * ops,gl_shader_stage stage,const VkAllocationCallbacks * alloc,size_t size)38 vk_shader_zalloc(struct vk_device *device,
39                  const struct vk_shader_ops *ops,
40                  gl_shader_stage stage,
41                  const VkAllocationCallbacks *alloc,
42                  size_t size)
43 {
44    /* For internal allocations, we need to allocate from the device scope
45     * because they might be put in pipeline caches.  Importantly, it is
46     * impossible for the client to get at this pointer and we apply this
47     * heuristic before we account for allocation fallbacks so this will only
48     * ever happen for internal shader objectx.
49     */
50    const VkSystemAllocationScope alloc_scope =
51       alloc == &device->alloc ? VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
52                               : VK_SYSTEM_ALLOCATION_SCOPE_OBJECT;
53 
54    struct vk_shader *shader = vk_zalloc2(&device->alloc, alloc, size, 8,
55                                          alloc_scope);
56    if (shader == NULL)
57       return NULL;
58 
59    vk_object_base_init(device, &shader->base, VK_OBJECT_TYPE_SHADER_EXT);
60    shader->ops = ops;
61    shader->stage = stage;
62 
63    return shader;
64 }
65 
66 void
vk_shader_free(struct vk_device * device,const VkAllocationCallbacks * alloc,struct vk_shader * shader)67 vk_shader_free(struct vk_device *device,
68                const VkAllocationCallbacks *alloc,
69                struct vk_shader *shader)
70 {
71    vk_object_base_finish(&shader->base);
72    vk_free2(&device->alloc, alloc, shader);
73 }
74 
75 int
vk_shader_cmp_graphics_stages(gl_shader_stage a,gl_shader_stage b)76 vk_shader_cmp_graphics_stages(gl_shader_stage a, gl_shader_stage b)
77 {
78    static const int stage_order[MESA_SHADER_MESH + 1] = {
79       [MESA_SHADER_VERTEX] = 1,
80       [MESA_SHADER_TESS_CTRL] = 2,
81       [MESA_SHADER_TESS_EVAL] = 3,
82       [MESA_SHADER_GEOMETRY] = 4,
83       [MESA_SHADER_TASK] = 5,
84       [MESA_SHADER_MESH] = 6,
85       [MESA_SHADER_FRAGMENT] = 7,
86    };
87 
88    assert(a < ARRAY_SIZE(stage_order) && stage_order[a] > 0);
89    assert(b < ARRAY_SIZE(stage_order) && stage_order[b] > 0);
90 
91    return stage_order[a] - stage_order[b];
92 }
93 
94 struct stage_idx {
95    gl_shader_stage stage;
96    uint32_t idx;
97 };
98 
99 static int
cmp_stage_idx(const void * _a,const void * _b)100 cmp_stage_idx(const void *_a, const void *_b)
101 {
102    const struct stage_idx *a = _a, *b = _b;
103    return vk_shader_cmp_graphics_stages(a->stage, b->stage);
104 }
105 
106 static nir_shader *
vk_shader_to_nir(struct vk_device * device,const VkShaderCreateInfoEXT * info,const struct vk_pipeline_robustness_state * rs)107 vk_shader_to_nir(struct vk_device *device,
108                  const VkShaderCreateInfoEXT *info,
109                  const struct vk_pipeline_robustness_state *rs)
110 {
111    const struct vk_device_shader_ops *ops = device->shader_ops;
112 
113    const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
114    const nir_shader_compiler_options *nir_options =
115       ops->get_nir_options(device->physical, stage, rs);
116    struct spirv_to_nir_options spirv_options =
117       ops->get_spirv_options(device->physical, stage, rs);
118 
119    enum gl_subgroup_size subgroup_size = vk_get_subgroup_size(
120       vk_spirv_version(info->pCode, info->codeSize),
121       stage, info->pNext,
122       info->flags & VK_SHADER_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT,
123       info->flags &VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
124 
125    nir_shader *nir = vk_spirv_to_nir(device,
126                                      info->pCode, info->codeSize,
127                                      stage, info->pName,
128                                      subgroup_size,
129                                      info->pSpecializationInfo,
130                                      &spirv_options, nir_options,
131                                      false /* internal */, NULL);
132    if (nir == NULL)
133       return NULL;
134 
135    if (ops->preprocess_nir != NULL)
136       ops->preprocess_nir(device->physical, nir);
137 
138    return nir;
139 }
140 
141 struct set_layouts {
142    struct vk_descriptor_set_layout *set_layouts[MESA_VK_MAX_DESCRIPTOR_SETS];
143 };
144 
145 static void
vk_shader_compile_info_init(struct vk_shader_compile_info * info,struct set_layouts * set_layouts,const VkShaderCreateInfoEXT * vk_info,const struct vk_pipeline_robustness_state * rs,nir_shader * nir)146 vk_shader_compile_info_init(struct vk_shader_compile_info *info,
147                             struct set_layouts *set_layouts,
148                             const VkShaderCreateInfoEXT *vk_info,
149                             const struct vk_pipeline_robustness_state *rs,
150                             nir_shader *nir)
151 {
152    for (uint32_t sl = 0; sl < vk_info->setLayoutCount; sl++) {
153       set_layouts->set_layouts[sl] =
154          vk_descriptor_set_layout_from_handle(vk_info->pSetLayouts[sl]);
155    }
156 
157    *info = (struct vk_shader_compile_info) {
158       .stage = nir->info.stage,
159       .flags = vk_info->flags,
160       .next_stage_mask = vk_info->nextStage,
161       .nir = nir,
162       .robustness = rs,
163       .set_layout_count = vk_info->setLayoutCount,
164       .set_layouts = set_layouts->set_layouts,
165       .push_constant_range_count = vk_info->pushConstantRangeCount,
166       .push_constant_ranges = vk_info->pPushConstantRanges,
167    };
168 }
169 
170 PRAGMA_DIAGNOSTIC_PUSH
171 PRAGMA_DIAGNOSTIC_ERROR(-Wpadded)
172 struct vk_shader_bin_header {
173    char mesavkshaderbin[16];
174    VkDriverId driver_id;
175    uint8_t uuid[VK_UUID_SIZE];
176    uint32_t version;
177    uint64_t size;
178    uint8_t sha1[SHA1_DIGEST_LENGTH];
179    uint32_t _pad;
180 };
181 PRAGMA_DIAGNOSTIC_POP
182 static_assert(sizeof(struct vk_shader_bin_header) == 72,
183               "This struct has no holes");
184 
185 static void
vk_shader_bin_header_init(struct vk_shader_bin_header * header,struct vk_physical_device * device)186 vk_shader_bin_header_init(struct vk_shader_bin_header *header,
187                           struct vk_physical_device *device)
188 {
189    *header = (struct vk_shader_bin_header) {
190       .mesavkshaderbin = "MesaVkShaderBin",
191       .driver_id = device->properties.driverID,
192    };
193 
194    memcpy(header->uuid, device->properties.shaderBinaryUUID, VK_UUID_SIZE);
195    header->version = device->properties.shaderBinaryVersion;
196 }
197 
198 static VkResult
vk_shader_serialize(struct vk_device * device,struct vk_shader * shader,struct blob * blob)199 vk_shader_serialize(struct vk_device *device,
200                     struct vk_shader *shader,
201                     struct blob *blob)
202 {
203    struct vk_shader_bin_header header;
204    vk_shader_bin_header_init(&header, device->physical);
205 
206    ASSERTED intptr_t header_offset = blob_reserve_bytes(blob, sizeof(header));
207    assert(header_offset == 0);
208 
209    bool success = shader->ops->serialize(device, shader, blob);
210    if (!success || blob->out_of_memory)
211       return VK_INCOMPLETE;
212 
213    /* Finalize and write the header */
214    header.size = blob->size;
215    if (blob->data != NULL) {
216       assert(sizeof(header) <= blob->size);
217 
218       struct mesa_sha1 sha1_ctx;
219       _mesa_sha1_init(&sha1_ctx);
220 
221       /* Hash the header with a zero SHA1 */
222       _mesa_sha1_update(&sha1_ctx, &header, sizeof(header));
223 
224       /* Hash the serialized data */
225       _mesa_sha1_update(&sha1_ctx, blob->data + sizeof(header),
226                         blob->size - sizeof(header));
227 
228       _mesa_sha1_final(&sha1_ctx, header.sha1);
229 
230       blob_overwrite_bytes(blob, header_offset, &header, sizeof(header));
231    }
232 
233    return VK_SUCCESS;
234 }
235 
236 static VkResult
vk_shader_deserialize(struct vk_device * device,size_t data_size,const void * data,const VkAllocationCallbacks * pAllocator,struct vk_shader ** shader_out)237 vk_shader_deserialize(struct vk_device *device,
238                       size_t data_size, const void *data,
239                       const VkAllocationCallbacks* pAllocator,
240                       struct vk_shader **shader_out)
241 {
242    const struct vk_device_shader_ops *ops = device->shader_ops;
243 
244    struct blob_reader blob;
245    blob_reader_init(&blob, data, data_size);
246 
247    struct vk_shader_bin_header header, ref_header;
248    blob_copy_bytes(&blob, &header, sizeof(header));
249    if (blob.overrun)
250       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
251 
252    vk_shader_bin_header_init(&ref_header, device->physical);
253 
254    if (memcmp(header.mesavkshaderbin, ref_header.mesavkshaderbin,
255               sizeof(header.mesavkshaderbin)))
256       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
257 
258    if (header.driver_id != ref_header.driver_id)
259       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
260 
261    if (memcmp(header.uuid, ref_header.uuid, sizeof(header.uuid)))
262       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
263 
264    /* From the Vulkan 1.3.276 spec:
265     *
266     *    "Guaranteed compatibility of shader binaries is expressed through a
267     *    combination of the shaderBinaryUUID and shaderBinaryVersion members
268     *    of the VkPhysicalDeviceShaderObjectPropertiesEXT structure queried
269     *    from a physical device. Binary shaders retrieved from a physical
270     *    device with a certain shaderBinaryUUID are guaranteed to be
271     *    compatible with all other physical devices reporting the same
272     *    shaderBinaryUUID and the same or higher shaderBinaryVersion."
273     *
274     * We handle the version check here on behalf of the driver and then pass
275     * the version into the driver's deserialize callback.
276     *
277     * If a driver doesn't want to mess with versions, they can always make the
278     * UUID a hash and always report version 0 and that will make this check
279     * effectively a no-op.
280     */
281    if (header.version > ref_header.version)
282       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
283 
284    /* Reject shader binaries that are the wrong size. */
285    if (header.size != data_size)
286       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
287 
288    assert(blob.current == (uint8_t *)data + sizeof(header));
289    blob.end = (uint8_t *)data + data_size;
290 
291    struct mesa_sha1 sha1_ctx;
292    _mesa_sha1_init(&sha1_ctx);
293 
294    /* Hash the header with a zero SHA1 */
295    struct vk_shader_bin_header sha1_header = header;
296    memset(sha1_header.sha1, 0, sizeof(sha1_header.sha1));
297    _mesa_sha1_update(&sha1_ctx, &sha1_header, sizeof(sha1_header));
298 
299    /* Hash the serialized data */
300    _mesa_sha1_update(&sha1_ctx, (uint8_t *)data + sizeof(header),
301                      data_size - sizeof(header));
302 
303    _mesa_sha1_final(&sha1_ctx, ref_header.sha1);
304    if (memcmp(header.sha1, ref_header.sha1, sizeof(header.sha1)))
305       return vk_error(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT);
306 
307    /* We've now verified that the header matches and that the data has the
308     * right SHA1 hash so it's safe to call into the driver.
309     */
310    return ops->deserialize(device, &blob, header.version,
311                            pAllocator, shader_out);
312 }
313 
314 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_GetShaderBinaryDataEXT(VkDevice _device,VkShaderEXT _shader,size_t * pDataSize,void * pData)315 vk_common_GetShaderBinaryDataEXT(VkDevice _device,
316                                  VkShaderEXT _shader,
317                                  size_t *pDataSize,
318                                  void *pData)
319 {
320    VK_FROM_HANDLE(vk_device, device, _device);
321    VK_FROM_HANDLE(vk_shader, shader, _shader);
322    VkResult result;
323 
324    /* From the Vulkan 1.3.275 spec:
325     *
326     *    "If pData is NULL, then the size of the binary shader code of the
327     *    shader object, in bytes, is returned in pDataSize. Otherwise,
328     *    pDataSize must point to a variable set by the user to the size of the
329     *    buffer, in bytes, pointed to by pData, and on return the variable is
330     *    overwritten with the amount of data actually written to pData. If
331     *    pDataSize is less than the size of the binary shader code, nothing is
332     *    written to pData, and VK_INCOMPLETE will be returned instead of
333     *    VK_SUCCESS."
334     *
335     * This is annoying.  Unlike basically every other Vulkan data return
336     * method, we're not allowed to overwrite the client-provided memory region
337     * on VK_INCOMPLETE.  This means we either need to query the blob size
338     * up-front by serializing twice or we need to serialize into temporary
339     * memory and memcpy into the client-provided region.  We choose the first
340     * approach.
341     *
342     * In the common case, this means that vk_shader_ops::serialize will get
343     * called 3 times: Once for the client to get the size, once for us to
344     * validate the client's size, and once to actually write the data.  It's a
345     * bit heavy-weight but this shouldn't be in a hot path and this is better
346     * for memory efficiency.  Also, the vk_shader_ops::serialize should be
347     * pretty fast on a null blob.
348     */
349    struct blob blob;
350    blob_init_fixed(&blob, NULL, SIZE_MAX);
351    result = vk_shader_serialize(device, shader, &blob);
352    assert(result == VK_SUCCESS);
353 
354    if (result != VK_SUCCESS) {
355       *pDataSize = 0;
356       return result;
357    } else if (pData == NULL) {
358       *pDataSize = blob.size;
359       return VK_SUCCESS;
360    } else if (blob.size > *pDataSize) {
361       /* No data written */
362       *pDataSize = 0;
363       return VK_INCOMPLETE;
364    }
365 
366    blob_init_fixed(&blob, pData, *pDataSize);
367    result = vk_shader_serialize(device, shader, &blob);
368    assert(result == VK_SUCCESS);
369 
370    *pDataSize = blob.size;
371 
372    return result;
373 }
374 
375 /* The only place where we have "real" linking is graphics shaders and there
376  * is a limit as to how many of them can be linked together at one time.
377  */
378 #define VK_MAX_LINKED_SHADER_STAGES MESA_VK_MAX_GRAPHICS_PIPELINE_STAGES
379 
380 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_CreateShadersEXT(VkDevice _device,uint32_t createInfoCount,const VkShaderCreateInfoEXT * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkShaderEXT * pShaders)381 vk_common_CreateShadersEXT(VkDevice _device,
382                            uint32_t createInfoCount,
383                            const VkShaderCreateInfoEXT *pCreateInfos,
384                            const VkAllocationCallbacks *pAllocator,
385                            VkShaderEXT *pShaders)
386 {
387    VK_FROM_HANDLE(vk_device, device, _device);
388    const struct vk_device_shader_ops *ops = device->shader_ops;
389    VkResult first_fail_or_success = VK_SUCCESS;
390 
391    struct vk_pipeline_robustness_state rs = {
392       .storage_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
393       .uniform_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
394       .vertex_inputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT,
395       .images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT,
396       /* From the Vulkan 1.3.292 spec:
397        *
398        *    "This extension [VK_EXT_robustness2] also adds support for “null
399        *    descriptors”, where VK_NULL_HANDLE can be used instead of a valid
400        *    handle. Accesses to null descriptors have well-defined behavior,
401        *    and do not rely on robustness."
402        *
403        * For now, default these to true.
404        */
405       .null_uniform_buffer_descriptor = true,
406       .null_storage_buffer_descriptor = true,
407    };
408 
409    /* From the Vulkan 1.3.274 spec:
410     *
411     *    "When this function returns, whether or not it succeeds, it is
412     *    guaranteed that every element of pShaders will have been overwritten
413     *    by either VK_NULL_HANDLE or a valid VkShaderEXT handle."
414     *
415     * Zeroing up-front makes the error path easier.
416     */
417    memset(pShaders, 0, createInfoCount * sizeof(*pShaders));
418 
419    bool has_linked_spirv = false;
420    for (uint32_t i = 0; i < createInfoCount; i++) {
421       if (pCreateInfos[i].codeType == VK_SHADER_CODE_TYPE_SPIRV_EXT &&
422           (pCreateInfos[i].flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT))
423          has_linked_spirv = true;
424    }
425 
426    uint32_t linked_count = 0;
427    struct stage_idx linked[VK_MAX_LINKED_SHADER_STAGES];
428 
429    for (uint32_t i = 0; i < createInfoCount; i++) {
430       const VkShaderCreateInfoEXT *vk_info = &pCreateInfos[i];
431       VkResult result = VK_SUCCESS;
432 
433       switch (vk_info->codeType) {
434       case VK_SHADER_CODE_TYPE_BINARY_EXT: {
435          /* This isn't required by Vulkan but we're allowed to fail binary
436           * import for basically any reason.  This seems like a pretty good
437           * reason.
438           */
439          if (has_linked_spirv &&
440              (vk_info->flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT)) {
441             result = vk_errorf(device, VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT,
442                                "Cannot mix linked binary and SPIR-V");
443             break;
444          }
445 
446          struct vk_shader *shader;
447          result = vk_shader_deserialize(device, vk_info->codeSize,
448                                         vk_info->pCode, pAllocator,
449                                         &shader);
450          if (result != VK_SUCCESS)
451             break;
452 
453          pShaders[i] = vk_shader_to_handle(shader);
454          break;
455       }
456 
457       case VK_SHADER_CODE_TYPE_SPIRV_EXT: {
458          if (vk_info->flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT) {
459             /* Stash it and compile later */
460             assert(linked_count < ARRAY_SIZE(linked));
461             linked[linked_count++] = (struct stage_idx) {
462                .stage = vk_to_mesa_shader_stage(vk_info->stage),
463                .idx = i,
464             };
465          } else {
466             nir_shader *nir = vk_shader_to_nir(device, vk_info, &rs);
467             if (nir == NULL) {
468                result = vk_errorf(device, VK_ERROR_UNKNOWN,
469                                   "Failed to compile shader to NIR");
470                break;
471             }
472 
473             struct vk_shader_compile_info info;
474             struct set_layouts set_layouts;
475             vk_shader_compile_info_init(&info, &set_layouts,
476                                         vk_info, &rs, nir);
477 
478             struct vk_shader *shader;
479             result = ops->compile(device, 1, &info, NULL /* state */,
480                                   pAllocator, &shader);
481             if (result != VK_SUCCESS)
482                break;
483 
484             pShaders[i] = vk_shader_to_handle(shader);
485          }
486          break;
487       }
488 
489       default:
490          unreachable("Unknown shader code type");
491       }
492 
493       if (first_fail_or_success == VK_SUCCESS)
494          first_fail_or_success = result;
495    }
496 
497    if (linked_count > 0) {
498       struct set_layouts set_layouts[VK_MAX_LINKED_SHADER_STAGES];
499       struct vk_shader_compile_info infos[VK_MAX_LINKED_SHADER_STAGES];
500       VkResult result = VK_SUCCESS;
501 
502       /* Sort so we guarantee the driver always gets them in-order */
503       qsort(linked, linked_count, sizeof(*linked), cmp_stage_idx);
504 
505       /* Memset for easy error handling */
506       memset(infos, 0, sizeof(infos));
507 
508       for (uint32_t l = 0; l < linked_count; l++) {
509          const VkShaderCreateInfoEXT *vk_info = &pCreateInfos[linked[l].idx];
510 
511          nir_shader *nir = vk_shader_to_nir(device, vk_info, &rs);
512          if (nir == NULL) {
513             result = vk_errorf(device, VK_ERROR_UNKNOWN,
514                                "Failed to compile shader to NIR");
515             break;
516          }
517 
518          vk_shader_compile_info_init(&infos[l], &set_layouts[l],
519                                      vk_info, &rs, nir);
520       }
521 
522       if (result == VK_SUCCESS) {
523          struct vk_shader *shaders[VK_MAX_LINKED_SHADER_STAGES];
524 
525          result = ops->compile(device, linked_count, infos, NULL /* state */,
526                                pAllocator, shaders);
527          if (result == VK_SUCCESS) {
528             for (uint32_t l = 0; l < linked_count; l++)
529                pShaders[linked[l].idx] = vk_shader_to_handle(shaders[l]);
530          }
531       } else {
532          for (uint32_t l = 0; l < linked_count; l++) {
533             if (infos[l].nir != NULL)
534                ralloc_free(infos[l].nir);
535          }
536       }
537 
538       if (first_fail_or_success == VK_SUCCESS)
539          first_fail_or_success = result;
540    }
541 
542    return first_fail_or_success;
543 }
544 
545 VKAPI_ATTR void VKAPI_CALL
vk_common_DestroyShaderEXT(VkDevice _device,VkShaderEXT _shader,const VkAllocationCallbacks * pAllocator)546 vk_common_DestroyShaderEXT(VkDevice _device,
547                            VkShaderEXT _shader,
548                            const VkAllocationCallbacks *pAllocator)
549 {
550    VK_FROM_HANDLE(vk_device, device, _device);
551    VK_FROM_HANDLE(vk_shader, shader, _shader);
552 
553    if (shader == NULL)
554       return;
555 
556    vk_shader_destroy(device, shader, pAllocator);
557 }
558 
559 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdBindShadersEXT(VkCommandBuffer commandBuffer,uint32_t stageCount,const VkShaderStageFlagBits * pStages,const VkShaderEXT * pShaders)560 vk_common_CmdBindShadersEXT(VkCommandBuffer commandBuffer,
561                             uint32_t stageCount,
562                             const VkShaderStageFlagBits *pStages,
563                             const VkShaderEXT *pShaders)
564 {
565    VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
566    struct vk_device *device = cmd_buffer->base.device;
567    const struct vk_device_shader_ops *ops = device->shader_ops;
568 
569    STACK_ARRAY(gl_shader_stage, stages, stageCount);
570    STACK_ARRAY(struct vk_shader *, shaders, stageCount);
571 
572    VkShaderStageFlags vk_stages = 0;
573    for (uint32_t i = 0; i < stageCount; i++) {
574       vk_stages |= pStages[i];
575       stages[i] = vk_to_mesa_shader_stage(pStages[i]);
576       shaders[i] = pShaders != NULL ? vk_shader_from_handle(pShaders[i]) : NULL;
577    }
578 
579    vk_cmd_unbind_pipelines_for_stages(cmd_buffer, vk_stages);
580    if (vk_stages & ~VK_SHADER_STAGE_COMPUTE_BIT)
581       vk_cmd_set_rp_attachments(cmd_buffer, ~0);
582 
583    ops->cmd_bind_shaders(cmd_buffer, stageCount, stages, shaders);
584 }
585