xref: /aosp_15_r20/external/virglrenderer/src/venus/vkr_queue.h (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2020 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef VKR_QUEUE_H
7 #define VKR_QUEUE_H
8 
9 #include "vkr_common.h"
10 
11 struct vkr_queue_sync {
12    VkFence fence;
13    bool device_lost;
14 
15    uint32_t flags;
16    uint32_t ring_idx;
17    uint64_t fence_id;
18 
19    struct list_head head;
20 };
21 
22 struct vkr_queue {
23    struct vkr_object base;
24 
25    struct vkr_context *context;
26    struct vkr_device *device;
27 
28    VkDeviceQueueCreateFlags flags;
29    uint32_t family;
30    uint32_t index;
31 
32    /* only used when client driver uses multiple timelines */
33    uint32_t ring_idx;
34 
35    /* Submitted fences are added to pending_syncs first.  How submitted fences
36     * are retired depends on VKR_RENDERER_THREAD_SYNC and
37     * VKR_RENDERER_ASYNC_FENCE_CB.
38     *
39     * When VKR_RENDERER_THREAD_SYNC is not set, the main thread calls
40     * vkGetFenceStatus and retires signaled fences in pending_syncs in order.
41     *
42     * When VKR_RENDERER_THREAD_SYNC is set but VKR_RENDERER_ASYNC_FENCE_CB is
43     * not set, the sync thread calls vkWaitForFences and moves signaled fences
44     * from pending_syncs to signaled_syncs in order.  The main thread simply
45     * retires all fences in signaled_syncs.
46     *
47     * When VKR_RENDERER_THREAD_SYNC and VKR_RENDERER_ASYNC_FENCE_CB are both
48     * set, the sync thread calls vkWaitForFences and retires signaled fences
49     * in pending_syncs in order.
50     */
51    int eventfd;
52    thrd_t thread;
53    mtx_t mutex;
54    cnd_t cond;
55    bool join;
56    struct list_head pending_syncs;
57    struct list_head signaled_syncs;
58 
59    struct list_head busy_head;
60 };
61 VKR_DEFINE_OBJECT_CAST(queue, VK_OBJECT_TYPE_QUEUE, VkQueue)
62 
63 struct vkr_fence {
64    struct vkr_object base;
65 };
66 VKR_DEFINE_OBJECT_CAST(fence, VK_OBJECT_TYPE_FENCE, VkFence)
67 
68 struct vkr_semaphore {
69    struct vkr_object base;
70 };
71 VKR_DEFINE_OBJECT_CAST(semaphore, VK_OBJECT_TYPE_SEMAPHORE, VkSemaphore)
72 
73 struct vkr_event {
74    struct vkr_object base;
75 };
76 VKR_DEFINE_OBJECT_CAST(event, VK_OBJECT_TYPE_EVENT, VkEvent)
77 
78 void
79 vkr_context_init_queue_dispatch(struct vkr_context *ctx);
80 
81 void
82 vkr_context_init_fence_dispatch(struct vkr_context *ctx);
83 
84 void
85 vkr_context_init_semaphore_dispatch(struct vkr_context *ctx);
86 
87 void
88 vkr_context_init_event_dispatch(struct vkr_context *ctx);
89 
90 struct vkr_queue_sync *
91 vkr_device_alloc_queue_sync(struct vkr_device *dev,
92                             uint32_t fence_flags,
93                             uint32_t ring_idx,
94                             uint64_t fence_id);
95 
96 void
97 vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync);
98 
99 void
100 vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
101                              struct list_head *retired_syncs,
102                              bool *queue_empty);
103 
104 struct vkr_queue *
105 vkr_queue_create(struct vkr_context *ctx,
106                  struct vkr_device *dev,
107                  VkDeviceQueueCreateFlags flags,
108                  uint32_t family,
109                  uint32_t index,
110                  VkQueue handle);
111 
112 void
113 vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue);
114 
115 #endif /* VKR_QUEUE_H */
116