xref: /aosp_15_r20/external/mesa3d/src/virtio/venus-protocol/vn_protocol_driver_descriptor_pool.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /* This file is generated by venus-protocol.  See vn_protocol_driver.h. */
2 
3 /*
4  * Copyright 2020 Google LLC
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #ifndef VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H
9 #define VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H
10 
11 #include "vn_ring.h"
12 #include "vn_protocol_driver_structs.h"
13 
14 /* struct VkDescriptorPoolSize */
15 
16 static inline size_t
vn_sizeof_VkDescriptorPoolSize(const VkDescriptorPoolSize * val)17 vn_sizeof_VkDescriptorPoolSize(const VkDescriptorPoolSize *val)
18 {
19     size_t size = 0;
20     size += vn_sizeof_VkDescriptorType(&val->type);
21     size += vn_sizeof_uint32_t(&val->descriptorCount);
22     return size;
23 }
24 
25 static inline void
vn_encode_VkDescriptorPoolSize(struct vn_cs_encoder * enc,const VkDescriptorPoolSize * val)26 vn_encode_VkDescriptorPoolSize(struct vn_cs_encoder *enc, const VkDescriptorPoolSize *val)
27 {
28     vn_encode_VkDescriptorType(enc, &val->type);
29     vn_encode_uint32_t(enc, &val->descriptorCount);
30 }
31 
32 /* struct VkDescriptorPoolInlineUniformBlockCreateInfo chain */
33 
34 static inline size_t
vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(const void * val)35 vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(const void *val)
36 {
37     /* no known/supported struct */
38     return vn_sizeof_simple_pointer(NULL);
39 }
40 
41 static inline size_t
vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_self(const VkDescriptorPoolInlineUniformBlockCreateInfo * val)42 vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_self(const VkDescriptorPoolInlineUniformBlockCreateInfo *val)
43 {
44     size_t size = 0;
45     /* skip val->{sType,pNext} */
46     size += vn_sizeof_uint32_t(&val->maxInlineUniformBlockBindings);
47     return size;
48 }
49 
50 static inline size_t
vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo(const VkDescriptorPoolInlineUniformBlockCreateInfo * val)51 vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo(const VkDescriptorPoolInlineUniformBlockCreateInfo *val)
52 {
53     size_t size = 0;
54 
55     size += vn_sizeof_VkStructureType(&val->sType);
56     size += vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(val->pNext);
57     size += vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_self(val);
58 
59     return size;
60 }
61 
62 static inline void
vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(struct vn_cs_encoder * enc,const void * val)63 vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(struct vn_cs_encoder *enc, const void *val)
64 {
65     /* no known/supported struct */
66     vn_encode_simple_pointer(enc, NULL);
67 }
68 
69 static inline void
vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_self(struct vn_cs_encoder * enc,const VkDescriptorPoolInlineUniformBlockCreateInfo * val)70 vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_self(struct vn_cs_encoder *enc, const VkDescriptorPoolInlineUniformBlockCreateInfo *val)
71 {
72     /* skip val->{sType,pNext} */
73     vn_encode_uint32_t(enc, &val->maxInlineUniformBlockBindings);
74 }
75 
76 static inline void
vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo(struct vn_cs_encoder * enc,const VkDescriptorPoolInlineUniformBlockCreateInfo * val)77 vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo(struct vn_cs_encoder *enc, const VkDescriptorPoolInlineUniformBlockCreateInfo *val)
78 {
79     assert(val->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO);
80     vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO });
81     vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext(enc, val->pNext);
82     vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_self(enc, val);
83 }
84 
85 /* struct VkDescriptorPoolCreateInfo chain */
86 
87 static inline size_t
vn_sizeof_VkDescriptorPoolCreateInfo_pnext(const void * val)88 vn_sizeof_VkDescriptorPoolCreateInfo_pnext(const void *val)
89 {
90     const VkBaseInStructure *pnext = val;
91     size_t size = 0;
92 
93     while (pnext) {
94         switch ((int32_t)pnext->sType) {
95         case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO:
96             if (!vn_cs_renderer_protocol_has_extension(139 /* VK_EXT_inline_uniform_block */))
97                 break;
98             size += vn_sizeof_simple_pointer(pnext);
99             size += vn_sizeof_VkStructureType(&pnext->sType);
100             size += vn_sizeof_VkDescriptorPoolCreateInfo_pnext(pnext->pNext);
101             size += vn_sizeof_VkDescriptorPoolInlineUniformBlockCreateInfo_self((const VkDescriptorPoolInlineUniformBlockCreateInfo *)pnext);
102             return size;
103         case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
104             if (!vn_cs_renderer_protocol_has_extension(352 /* VK_VALVE_mutable_descriptor_type */) && !vn_cs_renderer_protocol_has_extension(495 /* VK_EXT_mutable_descriptor_type */))
105                 break;
106             size += vn_sizeof_simple_pointer(pnext);
107             size += vn_sizeof_VkStructureType(&pnext->sType);
108             size += vn_sizeof_VkDescriptorPoolCreateInfo_pnext(pnext->pNext);
109             size += vn_sizeof_VkMutableDescriptorTypeCreateInfoEXT_self((const VkMutableDescriptorTypeCreateInfoEXT *)pnext);
110             return size;
111         default:
112             /* ignore unknown/unsupported struct */
113             break;
114         }
115         pnext = pnext->pNext;
116     }
117 
118     return vn_sizeof_simple_pointer(NULL);
119 }
120 
121 static inline size_t
vn_sizeof_VkDescriptorPoolCreateInfo_self(const VkDescriptorPoolCreateInfo * val)122 vn_sizeof_VkDescriptorPoolCreateInfo_self(const VkDescriptorPoolCreateInfo *val)
123 {
124     size_t size = 0;
125     /* skip val->{sType,pNext} */
126     size += vn_sizeof_VkFlags(&val->flags);
127     size += vn_sizeof_uint32_t(&val->maxSets);
128     size += vn_sizeof_uint32_t(&val->poolSizeCount);
129     if (val->pPoolSizes) {
130         size += vn_sizeof_array_size(val->poolSizeCount);
131         for (uint32_t i = 0; i < val->poolSizeCount; i++)
132             size += vn_sizeof_VkDescriptorPoolSize(&val->pPoolSizes[i]);
133     } else {
134         size += vn_sizeof_array_size(0);
135     }
136     return size;
137 }
138 
139 static inline size_t
vn_sizeof_VkDescriptorPoolCreateInfo(const VkDescriptorPoolCreateInfo * val)140 vn_sizeof_VkDescriptorPoolCreateInfo(const VkDescriptorPoolCreateInfo *val)
141 {
142     size_t size = 0;
143 
144     size += vn_sizeof_VkStructureType(&val->sType);
145     size += vn_sizeof_VkDescriptorPoolCreateInfo_pnext(val->pNext);
146     size += vn_sizeof_VkDescriptorPoolCreateInfo_self(val);
147 
148     return size;
149 }
150 
151 static inline void
vn_encode_VkDescriptorPoolCreateInfo_pnext(struct vn_cs_encoder * enc,const void * val)152 vn_encode_VkDescriptorPoolCreateInfo_pnext(struct vn_cs_encoder *enc, const void *val)
153 {
154     const VkBaseInStructure *pnext = val;
155 
156     while (pnext) {
157         switch ((int32_t)pnext->sType) {
158         case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO:
159             if (!vn_cs_renderer_protocol_has_extension(139 /* VK_EXT_inline_uniform_block */))
160                 break;
161             vn_encode_simple_pointer(enc, pnext);
162             vn_encode_VkStructureType(enc, &pnext->sType);
163             vn_encode_VkDescriptorPoolCreateInfo_pnext(enc, pnext->pNext);
164             vn_encode_VkDescriptorPoolInlineUniformBlockCreateInfo_self(enc, (const VkDescriptorPoolInlineUniformBlockCreateInfo *)pnext);
165             return;
166         case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
167             if (!vn_cs_renderer_protocol_has_extension(352 /* VK_VALVE_mutable_descriptor_type */) && !vn_cs_renderer_protocol_has_extension(495 /* VK_EXT_mutable_descriptor_type */))
168                 break;
169             vn_encode_simple_pointer(enc, pnext);
170             vn_encode_VkStructureType(enc, &pnext->sType);
171             vn_encode_VkDescriptorPoolCreateInfo_pnext(enc, pnext->pNext);
172             vn_encode_VkMutableDescriptorTypeCreateInfoEXT_self(enc, (const VkMutableDescriptorTypeCreateInfoEXT *)pnext);
173             return;
174         default:
175             /* ignore unknown/unsupported struct */
176             break;
177         }
178         pnext = pnext->pNext;
179     }
180 
181     vn_encode_simple_pointer(enc, NULL);
182 }
183 
184 static inline void
vn_encode_VkDescriptorPoolCreateInfo_self(struct vn_cs_encoder * enc,const VkDescriptorPoolCreateInfo * val)185 vn_encode_VkDescriptorPoolCreateInfo_self(struct vn_cs_encoder *enc, const VkDescriptorPoolCreateInfo *val)
186 {
187     /* skip val->{sType,pNext} */
188     vn_encode_VkFlags(enc, &val->flags);
189     vn_encode_uint32_t(enc, &val->maxSets);
190     vn_encode_uint32_t(enc, &val->poolSizeCount);
191     if (val->pPoolSizes) {
192         vn_encode_array_size(enc, val->poolSizeCount);
193         for (uint32_t i = 0; i < val->poolSizeCount; i++)
194             vn_encode_VkDescriptorPoolSize(enc, &val->pPoolSizes[i]);
195     } else {
196         vn_encode_array_size(enc, 0);
197     }
198 }
199 
200 static inline void
vn_encode_VkDescriptorPoolCreateInfo(struct vn_cs_encoder * enc,const VkDescriptorPoolCreateInfo * val)201 vn_encode_VkDescriptorPoolCreateInfo(struct vn_cs_encoder *enc, const VkDescriptorPoolCreateInfo *val)
202 {
203     assert(val->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO);
204     vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO });
205     vn_encode_VkDescriptorPoolCreateInfo_pnext(enc, val->pNext);
206     vn_encode_VkDescriptorPoolCreateInfo_self(enc, val);
207 }
208 
vn_sizeof_vkCreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)209 static inline size_t vn_sizeof_vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
210 {
211     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDescriptorPool_EXT;
212     const VkFlags cmd_flags = 0;
213     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
214 
215     cmd_size += vn_sizeof_VkDevice(&device);
216     cmd_size += vn_sizeof_simple_pointer(pCreateInfo);
217     if (pCreateInfo)
218         cmd_size += vn_sizeof_VkDescriptorPoolCreateInfo(pCreateInfo);
219     cmd_size += vn_sizeof_simple_pointer(pAllocator);
220     if (pAllocator)
221         assert(false);
222     cmd_size += vn_sizeof_simple_pointer(pDescriptorPool);
223     if (pDescriptorPool)
224         cmd_size += vn_sizeof_VkDescriptorPool(pDescriptorPool);
225 
226     return cmd_size;
227 }
228 
vn_encode_vkCreateDescriptorPool(struct vn_cs_encoder * enc,VkCommandFlagsEXT cmd_flags,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)229 static inline void vn_encode_vkCreateDescriptorPool(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
230 {
231     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDescriptorPool_EXT;
232 
233     vn_encode_VkCommandTypeEXT(enc, &cmd_type);
234     vn_encode_VkFlags(enc, &cmd_flags);
235 
236     vn_encode_VkDevice(enc, &device);
237     if (vn_encode_simple_pointer(enc, pCreateInfo))
238         vn_encode_VkDescriptorPoolCreateInfo(enc, pCreateInfo);
239     if (vn_encode_simple_pointer(enc, pAllocator))
240         assert(false);
241     if (vn_encode_simple_pointer(enc, pDescriptorPool))
242         vn_encode_VkDescriptorPool(enc, pDescriptorPool);
243 }
244 
vn_sizeof_vkCreateDescriptorPool_reply(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)245 static inline size_t vn_sizeof_vkCreateDescriptorPool_reply(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
246 {
247     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDescriptorPool_EXT;
248     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
249 
250     VkResult ret;
251     cmd_size += vn_sizeof_VkResult(&ret);
252     /* skip device */
253     /* skip pCreateInfo */
254     /* skip pAllocator */
255     cmd_size += vn_sizeof_simple_pointer(pDescriptorPool);
256     if (pDescriptorPool)
257         cmd_size += vn_sizeof_VkDescriptorPool(pDescriptorPool);
258 
259     return cmd_size;
260 }
261 
vn_decode_vkCreateDescriptorPool_reply(struct vn_cs_decoder * dec,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)262 static inline VkResult vn_decode_vkCreateDescriptorPool_reply(struct vn_cs_decoder *dec, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
263 {
264     VkCommandTypeEXT command_type;
265     vn_decode_VkCommandTypeEXT(dec, &command_type);
266     assert(command_type == VK_COMMAND_TYPE_vkCreateDescriptorPool_EXT);
267 
268     VkResult ret;
269     vn_decode_VkResult(dec, &ret);
270     /* skip device */
271     /* skip pCreateInfo */
272     /* skip pAllocator */
273     if (vn_decode_simple_pointer(dec)) {
274         vn_decode_VkDescriptorPool(dec, pDescriptorPool);
275     } else {
276         pDescriptorPool = NULL;
277     }
278 
279     return ret;
280 }
281 
vn_sizeof_vkDestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)282 static inline size_t vn_sizeof_vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
283 {
284     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDescriptorPool_EXT;
285     const VkFlags cmd_flags = 0;
286     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
287 
288     cmd_size += vn_sizeof_VkDevice(&device);
289     cmd_size += vn_sizeof_VkDescriptorPool(&descriptorPool);
290     cmd_size += vn_sizeof_simple_pointer(pAllocator);
291     if (pAllocator)
292         assert(false);
293 
294     return cmd_size;
295 }
296 
vn_encode_vkDestroyDescriptorPool(struct vn_cs_encoder * enc,VkCommandFlagsEXT cmd_flags,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)297 static inline void vn_encode_vkDestroyDescriptorPool(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
298 {
299     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDescriptorPool_EXT;
300 
301     vn_encode_VkCommandTypeEXT(enc, &cmd_type);
302     vn_encode_VkFlags(enc, &cmd_flags);
303 
304     vn_encode_VkDevice(enc, &device);
305     vn_encode_VkDescriptorPool(enc, &descriptorPool);
306     if (vn_encode_simple_pointer(enc, pAllocator))
307         assert(false);
308 }
309 
vn_sizeof_vkDestroyDescriptorPool_reply(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)310 static inline size_t vn_sizeof_vkDestroyDescriptorPool_reply(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
311 {
312     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDescriptorPool_EXT;
313     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
314 
315     /* skip device */
316     /* skip descriptorPool */
317     /* skip pAllocator */
318 
319     return cmd_size;
320 }
321 
vn_decode_vkDestroyDescriptorPool_reply(struct vn_cs_decoder * dec,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)322 static inline void vn_decode_vkDestroyDescriptorPool_reply(struct vn_cs_decoder *dec, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
323 {
324     VkCommandTypeEXT command_type;
325     vn_decode_VkCommandTypeEXT(dec, &command_type);
326     assert(command_type == VK_COMMAND_TYPE_vkDestroyDescriptorPool_EXT);
327 
328     /* skip device */
329     /* skip descriptorPool */
330     /* skip pAllocator */
331 }
332 
vn_sizeof_vkResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)333 static inline size_t vn_sizeof_vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
334 {
335     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkResetDescriptorPool_EXT;
336     const VkFlags cmd_flags = 0;
337     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
338 
339     cmd_size += vn_sizeof_VkDevice(&device);
340     cmd_size += vn_sizeof_VkDescriptorPool(&descriptorPool);
341     cmd_size += vn_sizeof_VkFlags(&flags);
342 
343     return cmd_size;
344 }
345 
vn_encode_vkResetDescriptorPool(struct vn_cs_encoder * enc,VkCommandFlagsEXT cmd_flags,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)346 static inline void vn_encode_vkResetDescriptorPool(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
347 {
348     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkResetDescriptorPool_EXT;
349 
350     vn_encode_VkCommandTypeEXT(enc, &cmd_type);
351     vn_encode_VkFlags(enc, &cmd_flags);
352 
353     vn_encode_VkDevice(enc, &device);
354     vn_encode_VkDescriptorPool(enc, &descriptorPool);
355     vn_encode_VkFlags(enc, &flags);
356 }
357 
vn_sizeof_vkResetDescriptorPool_reply(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)358 static inline size_t vn_sizeof_vkResetDescriptorPool_reply(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
359 {
360     const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkResetDescriptorPool_EXT;
361     size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
362 
363     VkResult ret;
364     cmd_size += vn_sizeof_VkResult(&ret);
365     /* skip device */
366     /* skip descriptorPool */
367     /* skip flags */
368 
369     return cmd_size;
370 }
371 
vn_decode_vkResetDescriptorPool_reply(struct vn_cs_decoder * dec,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)372 static inline VkResult vn_decode_vkResetDescriptorPool_reply(struct vn_cs_decoder *dec, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
373 {
374     VkCommandTypeEXT command_type;
375     vn_decode_VkCommandTypeEXT(dec, &command_type);
376     assert(command_type == VK_COMMAND_TYPE_vkResetDescriptorPool_EXT);
377 
378     VkResult ret;
379     vn_decode_VkResult(dec, &ret);
380     /* skip device */
381     /* skip descriptorPool */
382     /* skip flags */
383 
384     return ret;
385 }
386 
vn_submit_vkCreateDescriptorPool(struct vn_ring * vn_ring,VkCommandFlagsEXT cmd_flags,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool,struct vn_ring_submit_command * submit)387 static inline void vn_submit_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool, struct vn_ring_submit_command *submit)
388 {
389     uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
390     void *cmd_data = local_cmd_data;
391     size_t cmd_size = vn_sizeof_vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
392     if (cmd_size > sizeof(local_cmd_data)) {
393         cmd_data = malloc(cmd_size);
394         if (!cmd_data)
395             cmd_size = 0;
396     }
397     const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateDescriptorPool_reply(device, pCreateInfo, pAllocator, pDescriptorPool) : 0;
398 
399     struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
400     if (cmd_size) {
401         vn_encode_vkCreateDescriptorPool(enc, cmd_flags, device, pCreateInfo, pAllocator, pDescriptorPool);
402         vn_ring_submit_command(vn_ring, submit);
403         if (cmd_data != local_cmd_data)
404             free(cmd_data);
405     }
406 }
407 
vn_submit_vkDestroyDescriptorPool(struct vn_ring * vn_ring,VkCommandFlagsEXT cmd_flags,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator,struct vn_ring_submit_command * submit)408 static inline void vn_submit_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
409 {
410     uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
411     void *cmd_data = local_cmd_data;
412     size_t cmd_size = vn_sizeof_vkDestroyDescriptorPool(device, descriptorPool, pAllocator);
413     if (cmd_size > sizeof(local_cmd_data)) {
414         cmd_data = malloc(cmd_size);
415         if (!cmd_data)
416             cmd_size = 0;
417     }
418     const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyDescriptorPool_reply(device, descriptorPool, pAllocator) : 0;
419 
420     struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
421     if (cmd_size) {
422         vn_encode_vkDestroyDescriptorPool(enc, cmd_flags, device, descriptorPool, pAllocator);
423         vn_ring_submit_command(vn_ring, submit);
424         if (cmd_data != local_cmd_data)
425             free(cmd_data);
426     }
427 }
428 
vn_submit_vkResetDescriptorPool(struct vn_ring * vn_ring,VkCommandFlagsEXT cmd_flags,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags,struct vn_ring_submit_command * submit)429 static inline void vn_submit_vkResetDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags, struct vn_ring_submit_command *submit)
430 {
431     uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
432     void *cmd_data = local_cmd_data;
433     size_t cmd_size = vn_sizeof_vkResetDescriptorPool(device, descriptorPool, flags);
434     if (cmd_size > sizeof(local_cmd_data)) {
435         cmd_data = malloc(cmd_size);
436         if (!cmd_data)
437             cmd_size = 0;
438     }
439     const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetDescriptorPool_reply(device, descriptorPool, flags) : 0;
440 
441     struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
442     if (cmd_size) {
443         vn_encode_vkResetDescriptorPool(enc, cmd_flags, device, descriptorPool, flags);
444         vn_ring_submit_command(vn_ring, submit);
445         if (cmd_data != local_cmd_data)
446             free(cmd_data);
447     }
448 }
449 
vn_call_vkCreateDescriptorPool(struct vn_ring * vn_ring,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)450 static inline VkResult vn_call_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
451 {
452     VN_TRACE_FUNC();
453 
454     struct vn_ring_submit_command submit;
455     vn_submit_vkCreateDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
456     struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
457     if (dec) {
458         const VkResult ret = vn_decode_vkCreateDescriptorPool_reply(dec, device, pCreateInfo, pAllocator, pDescriptorPool);
459         vn_ring_free_command_reply(vn_ring, &submit);
460         return ret;
461     } else {
462         return VK_ERROR_OUT_OF_HOST_MEMORY;
463     }
464 }
465 
vn_async_vkCreateDescriptorPool(struct vn_ring * vn_ring,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)466 static inline void vn_async_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
467 {
468     struct vn_ring_submit_command submit;
469     vn_submit_vkCreateDescriptorPool(vn_ring, 0, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
470 }
471 
vn_call_vkDestroyDescriptorPool(struct vn_ring * vn_ring,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)472 static inline void vn_call_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
473 {
474     VN_TRACE_FUNC();
475 
476     struct vn_ring_submit_command submit;
477     vn_submit_vkDestroyDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, pAllocator, &submit);
478     struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
479     if (dec) {
480         vn_decode_vkDestroyDescriptorPool_reply(dec, device, descriptorPool, pAllocator);
481         vn_ring_free_command_reply(vn_ring, &submit);
482     }
483 }
484 
vn_async_vkDestroyDescriptorPool(struct vn_ring * vn_ring,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)485 static inline void vn_async_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
486 {
487     struct vn_ring_submit_command submit;
488     vn_submit_vkDestroyDescriptorPool(vn_ring, 0, device, descriptorPool, pAllocator, &submit);
489 }
490 
vn_call_vkResetDescriptorPool(struct vn_ring * vn_ring,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)491 static inline VkResult vn_call_vkResetDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
492 {
493     VN_TRACE_FUNC();
494 
495     struct vn_ring_submit_command submit;
496     vn_submit_vkResetDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, flags, &submit);
497     struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
498     if (dec) {
499         const VkResult ret = vn_decode_vkResetDescriptorPool_reply(dec, device, descriptorPool, flags);
500         vn_ring_free_command_reply(vn_ring, &submit);
501         return ret;
502     } else {
503         return VK_ERROR_OUT_OF_HOST_MEMORY;
504     }
505 }
506 
vn_async_vkResetDescriptorPool(struct vn_ring * vn_ring,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)507 static inline void vn_async_vkResetDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
508 {
509     struct vn_ring_submit_command submit;
510     vn_submit_vkResetDescriptorPool(vn_ring, 0, device, descriptorPool, flags, &submit);
511 }
512 
513 #endif /* VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H */
514