xref: /aosp_15_r20/external/mesa3d/src/virtio/vulkan/vn_renderer.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef VN_RENDERER_H
7 #define VN_RENDERER_H
8 
9 #include "vn_common.h"
10 
11 struct vn_renderer_shmem {
12    struct vn_refcount refcount;
13 
14    uint32_t res_id;
15    size_t mmap_size; /* for internal use only (i.e., munmap) */
16    void *mmap_ptr;
17 
18    struct list_head cache_head;
19    int64_t cache_timestamp;
20 };
21 
22 struct vn_renderer_bo {
23    struct vn_refcount refcount;
24 
25    uint32_t res_id;
26    /* for internal use only */
27    size_t mmap_size;
28    void *mmap_ptr;
29 };
30 
31 /*
32  * A sync consists of a uint64_t counter.  The counter can be updated by CPU
33  * or by GPU.  It can also be waited on by CPU or by GPU until it reaches
34  * certain values.
35  *
36  * This models after timeline VkSemaphore rather than timeline drm_syncobj.
37  * The main difference is that drm_syncobj can have unsignaled value 0.
38  */
39 struct vn_renderer_sync {
40    uint32_t sync_id;
41 };
42 
43 struct vn_renderer_info {
44    struct {
45       VkPhysicalDeviceDrmPropertiesEXT props;
46    } drm;
47 
48    struct {
49       uint16_t vendor_id;
50       uint16_t device_id;
51 
52       bool has_bus_info;
53       VkPhysicalDevicePCIBusInfoPropertiesEXT props;
54    } pci;
55 
56    bool has_dma_buf_import;
57    bool has_external_sync;
58    bool has_implicit_fencing;
59    bool has_guest_vram;
60 
61    uint32_t max_timeline_count;
62 
63    /* hw capset */
64    uint32_t wire_format_version;
65    uint32_t vk_xml_version;
66    uint32_t vk_ext_command_serialization_spec_version;
67    uint32_t vk_mesa_venus_protocol_spec_version;
68 
69    /* combined mask for vk_extension_mask1, 2,..., N */
70    uint32_t vk_extension_mask[32];
71 };
72 
73 struct vn_renderer_submit_batch {
74    const void *cs_data;
75    size_t cs_size;
76 
77    /*
78     * Submit cs to the timeline identified by ring_idx. A timeline is
79     * typically associated with a physical VkQueue and bound to the ring_idx
80     * during VkQueue creation. After execution completes on the VkQueue, the
81     * timeline sync point is signaled.
82     *
83     * ring_idx 0 is reserved for the context-specific CPU timeline. sync
84     * points on the CPU timeline are signaled immediately after command
85     * processing by the renderer.
86     */
87    uint32_t ring_idx;
88 
89    /* syncs to update when the timeline is signaled */
90    struct vn_renderer_sync *const *syncs;
91    /* TODO allow NULL when syncs are all binary? */
92    const uint64_t *sync_values;
93    uint32_t sync_count;
94 };
95 
96 struct vn_renderer_submit {
97    /* BOs to pin and to fence implicitly
98     *
99     * TODO track all bos and automatically pin them.  We don't do it yet
100     * because each vn_command_buffer owns a bo.  We can probably make do by
101     * returning the bos to a bo cache and exclude bo cache from pinning.
102     */
103    struct vn_renderer_bo *const *bos;
104    uint32_t bo_count;
105 
106    const struct vn_renderer_submit_batch *batches;
107    uint32_t batch_count;
108 };
109 
110 struct vn_renderer_wait {
111    bool wait_any;
112    uint64_t timeout;
113 
114    struct vn_renderer_sync *const *syncs;
115    /* TODO allow NULL when syncs are all binary? */
116    const uint64_t *sync_values;
117    uint32_t sync_count;
118 };
119 
120 struct vn_renderer_ops {
121    void (*destroy)(struct vn_renderer *renderer,
122                    const VkAllocationCallbacks *alloc);
123 
124    VkResult (*submit)(struct vn_renderer *renderer,
125                       const struct vn_renderer_submit *submit);
126 
127    /*
128     * On success, returns VK_SUCCESS or VK_TIMEOUT.  On failure, returns
129     * VK_ERROR_DEVICE_LOST or out of device/host memory.
130     */
131    VkResult (*wait)(struct vn_renderer *renderer,
132                     const struct vn_renderer_wait *wait);
133 };
134 
135 struct vn_renderer_shmem_ops {
136    struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
137                                        size_t size);
138    void (*destroy)(struct vn_renderer *renderer,
139                    struct vn_renderer_shmem *shmem);
140 };
141 
142 struct vn_renderer_bo_ops {
143    VkResult (*create_from_device_memory)(
144       struct vn_renderer *renderer,
145       VkDeviceSize size,
146       vn_object_id mem_id,
147       VkMemoryPropertyFlags flags,
148       VkExternalMemoryHandleTypeFlags external_handles,
149       struct vn_renderer_bo **out_bo);
150 
151    VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
152                                    VkDeviceSize size,
153                                    int fd,
154                                    VkMemoryPropertyFlags flags,
155                                    struct vn_renderer_bo **out_bo);
156 
157    bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
158 
159    int (*export_dma_buf)(struct vn_renderer *renderer,
160                          struct vn_renderer_bo *bo);
161 
162    /* map is not thread-safe */
163    void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
164 
165    void (*flush)(struct vn_renderer *renderer,
166                  struct vn_renderer_bo *bo,
167                  VkDeviceSize offset,
168                  VkDeviceSize size);
169    void (*invalidate)(struct vn_renderer *renderer,
170                       struct vn_renderer_bo *bo,
171                       VkDeviceSize offset,
172                       VkDeviceSize size);
173 };
174 
175 enum vn_renderer_sync_flags {
176    VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
177    VN_RENDERER_SYNC_BINARY = 1u << 1,
178 };
179 
180 struct vn_renderer_sync_ops {
181    VkResult (*create)(struct vn_renderer *renderer,
182                       uint64_t initial_val,
183                       uint32_t flags,
184                       struct vn_renderer_sync **out_sync);
185 
186    VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
187                                    int fd,
188                                    bool sync_file,
189                                    struct vn_renderer_sync **out_sync);
190    void (*destroy)(struct vn_renderer *renderer,
191                    struct vn_renderer_sync *sync);
192 
193    int (*export_syncobj)(struct vn_renderer *renderer,
194                          struct vn_renderer_sync *sync,
195                          bool sync_file);
196 
197    /* reset the counter */
198    VkResult (*reset)(struct vn_renderer *renderer,
199                      struct vn_renderer_sync *sync,
200                      uint64_t initial_val);
201 
202    /* read the current value from the counter */
203    VkResult (*read)(struct vn_renderer *renderer,
204                     struct vn_renderer_sync *sync,
205                     uint64_t *val);
206 
207    /* write a new value (larger than the current one) to the counter */
208    VkResult (*write)(struct vn_renderer *renderer,
209                      struct vn_renderer_sync *sync,
210                      uint64_t val);
211 };
212 
213 struct vn_renderer {
214    struct vn_renderer_info info;
215    struct vn_renderer_ops ops;
216    struct vn_renderer_shmem_ops shmem_ops;
217    struct vn_renderer_bo_ops bo_ops;
218    struct vn_renderer_sync_ops sync_ops;
219 };
220 
221 VkResult
222 vn_renderer_create_virtgpu(struct vn_instance *instance,
223                            const VkAllocationCallbacks *alloc,
224                            struct vn_renderer **renderer);
225 
226 VkResult
227 vn_renderer_create_vtest(struct vn_instance *instance,
228                          const VkAllocationCallbacks *alloc,
229                          struct vn_renderer **renderer);
230 
231 static inline VkResult
vn_renderer_create(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)232 vn_renderer_create(struct vn_instance *instance,
233                    const VkAllocationCallbacks *alloc,
234                    struct vn_renderer **renderer)
235 {
236    if (VN_DEBUG(VTEST)) {
237       VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
238       if (result == VK_SUCCESS)
239          return VK_SUCCESS;
240    }
241 
242    return vn_renderer_create_virtgpu(instance, alloc, renderer);
243 }
244 
245 static inline void
vn_renderer_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)246 vn_renderer_destroy(struct vn_renderer *renderer,
247                     const VkAllocationCallbacks *alloc)
248 {
249    renderer->ops.destroy(renderer, alloc);
250 }
251 
252 static inline VkResult
vn_renderer_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)253 vn_renderer_submit(struct vn_renderer *renderer,
254                    const struct vn_renderer_submit *submit)
255 {
256    return renderer->ops.submit(renderer, submit);
257 }
258 
259 static inline VkResult
vn_renderer_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)260 vn_renderer_wait(struct vn_renderer *renderer,
261                  const struct vn_renderer_wait *wait)
262 {
263    return renderer->ops.wait(renderer, wait);
264 }
265 
266 static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer * renderer,size_t size)267 vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
268 {
269    VN_TRACE_FUNC();
270    struct vn_renderer_shmem *shmem =
271       renderer->shmem_ops.create(renderer, size);
272    if (shmem) {
273       assert(vn_refcount_is_valid(&shmem->refcount));
274       assert(shmem->res_id);
275       assert(shmem->mmap_size >= size);
276       assert(shmem->mmap_ptr);
277    }
278 
279    return shmem;
280 }
281 
282 static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)283 vn_renderer_shmem_ref(struct vn_renderer *renderer,
284                       struct vn_renderer_shmem *shmem)
285 {
286    vn_refcount_inc(&shmem->refcount);
287    return shmem;
288 }
289 
290 static inline void
vn_renderer_shmem_unref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)291 vn_renderer_shmem_unref(struct vn_renderer *renderer,
292                         struct vn_renderer_shmem *shmem)
293 {
294    if (vn_refcount_dec(&shmem->refcount))
295       renderer->shmem_ops.destroy(renderer, shmem);
296 }
297 
298 static inline VkResult
vn_renderer_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)299 vn_renderer_bo_create_from_device_memory(
300    struct vn_renderer *renderer,
301    VkDeviceSize size,
302    vn_object_id mem_id,
303    VkMemoryPropertyFlags flags,
304    VkExternalMemoryHandleTypeFlags external_handles,
305    struct vn_renderer_bo **out_bo)
306 {
307    struct vn_renderer_bo *bo;
308    VkResult result = renderer->bo_ops.create_from_device_memory(
309       renderer, size, mem_id, flags, external_handles, &bo);
310    if (result != VK_SUCCESS)
311       return result;
312 
313    assert(vn_refcount_is_valid(&bo->refcount));
314    assert(bo->res_id);
315    assert(!bo->mmap_size || bo->mmap_size >= size);
316 
317    *out_bo = bo;
318    return VK_SUCCESS;
319 }
320 
321 static inline VkResult
vn_renderer_bo_create_from_dma_buf(struct vn_renderer * renderer,VkDeviceSize size,int fd,VkMemoryPropertyFlags flags,struct vn_renderer_bo ** out_bo)322 vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
323                                    VkDeviceSize size,
324                                    int fd,
325                                    VkMemoryPropertyFlags flags,
326                                    struct vn_renderer_bo **out_bo)
327 {
328    struct vn_renderer_bo *bo;
329    VkResult result =
330       renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
331    if (result != VK_SUCCESS)
332       return result;
333 
334    assert(vn_refcount_is_valid(&bo->refcount));
335    assert(bo->res_id);
336    assert(!bo->mmap_size || bo->mmap_size >= size);
337 
338    *out_bo = bo;
339    return VK_SUCCESS;
340 }
341 
342 static inline struct vn_renderer_bo *
vn_renderer_bo_ref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)343 vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
344 {
345    vn_refcount_inc(&bo->refcount);
346    return bo;
347 }
348 
349 static inline bool
vn_renderer_bo_unref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)350 vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
351 {
352    if (vn_refcount_dec(&bo->refcount))
353       return renderer->bo_ops.destroy(renderer, bo);
354    return false;
355 }
356 
357 static inline int
vn_renderer_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * bo)358 vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
359                               struct vn_renderer_bo *bo)
360 {
361    return renderer->bo_ops.export_dma_buf(renderer, bo);
362 }
363 
364 static inline void *
vn_renderer_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * bo)365 vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
366 {
367    return renderer->bo_ops.map(renderer, bo);
368 }
369 
370 static inline void
vn_renderer_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize end)371 vn_renderer_bo_flush(struct vn_renderer *renderer,
372                      struct vn_renderer_bo *bo,
373                      VkDeviceSize offset,
374                      VkDeviceSize end)
375 {
376    renderer->bo_ops.flush(renderer, bo, offset, end);
377 }
378 
379 static inline void
vn_renderer_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)380 vn_renderer_bo_invalidate(struct vn_renderer *renderer,
381                           struct vn_renderer_bo *bo,
382                           VkDeviceSize offset,
383                           VkDeviceSize size)
384 {
385    renderer->bo_ops.invalidate(renderer, bo, offset, size);
386 }
387 
388 static inline VkResult
vn_renderer_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)389 vn_renderer_sync_create(struct vn_renderer *renderer,
390                         uint64_t initial_val,
391                         uint32_t flags,
392                         struct vn_renderer_sync **out_sync)
393 {
394    return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
395 }
396 
397 static inline VkResult
vn_renderer_sync_create_from_syncobj(struct vn_renderer * renderer,int fd,bool sync_file,struct vn_renderer_sync ** out_sync)398 vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
399                                      int fd,
400                                      bool sync_file,
401                                      struct vn_renderer_sync **out_sync)
402 {
403    return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
404                                                  out_sync);
405 }
406 
407 static inline void
vn_renderer_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * sync)408 vn_renderer_sync_destroy(struct vn_renderer *renderer,
409                          struct vn_renderer_sync *sync)
410 {
411    renderer->sync_ops.destroy(renderer, sync);
412 }
413 
414 static inline int
vn_renderer_sync_export_syncobj(struct vn_renderer * renderer,struct vn_renderer_sync * sync,bool sync_file)415 vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
416                                 struct vn_renderer_sync *sync,
417                                 bool sync_file)
418 {
419    return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
420 }
421 
422 static inline VkResult
vn_renderer_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)423 vn_renderer_sync_reset(struct vn_renderer *renderer,
424                        struct vn_renderer_sync *sync,
425                        uint64_t initial_val)
426 {
427    return renderer->sync_ops.reset(renderer, sync, initial_val);
428 }
429 
430 static inline VkResult
vn_renderer_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t * val)431 vn_renderer_sync_read(struct vn_renderer *renderer,
432                       struct vn_renderer_sync *sync,
433                       uint64_t *val)
434 {
435    return renderer->sync_ops.read(renderer, sync, val);
436 }
437 
438 static inline VkResult
vn_renderer_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t val)439 vn_renderer_sync_write(struct vn_renderer *renderer,
440                        struct vn_renderer_sync *sync,
441                        uint64_t val)
442 {
443    return renderer->sync_ops.write(renderer, sync, val);
444 }
445 
446 #endif /* VN_RENDERER_H */
447