1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #ifndef VN_INSTANCE_H
12 #define VN_INSTANCE_H
13
14 #include "vn_common.h"
15
16 #include "vn_renderer_util.h"
17
18 /* require and request at least Vulkan 1.1 at both instance and device levels
19 */
20 #define VN_MIN_RENDERER_VERSION VK_API_VERSION_1_1
21
22 /* max advertised version at both instance and device levels */
23 #if defined(ANDROID_STRICT) && ANDROID_API_LEVEL < 33
24 #define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
25 #else
26 #define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 3, VK_HEADER_VERSION)
27 #endif
28
29 struct vn_instance {
30 struct vn_instance_base base;
31
32 struct driOptionCache dri_options;
33 struct driOptionCache available_dri_options;
34 bool enable_wsi_multi_plane_modifiers;
35
36 struct vn_renderer *renderer;
37
38 /* for VN_CS_ENCODER_STORAGE_SHMEM_POOL */
39 struct vn_renderer_shmem_pool cs_shmem_pool;
40
41 struct vn_renderer_shmem_pool reply_shmem_pool;
42
43 mtx_t ring_idx_mutex;
44 uint64_t ring_idx_used_mask;
45
46 struct {
47 struct vn_ring *ring;
48 struct list_head tls_rings;
49
50 struct vn_watchdog watchdog;
51 } ring;
52
53 /* Between the driver and the app, VN_MAX_API_VERSION is what we advertise
54 * and base.base.app_info.api_version is what the app requests.
55 *
56 * Between the driver and the renderer, renderer_api_version is the api
57 * version we request internally, which can be higher than
58 * base.base.app_info.api_version. renderer_version is the instance
59 * version we can use internally.
60 */
61 uint32_t renderer_api_version;
62 uint32_t renderer_version;
63
64 bool engine_is_zink;
65
66 struct {
67 mtx_t mutex;
68 bool initialized;
69
70 struct vn_physical_device *devices;
71 uint32_t device_count;
72 VkPhysicalDeviceGroupProperties *groups;
73 uint32_t group_count;
74 } physical_device;
75 };
76 VK_DEFINE_HANDLE_CASTS(vn_instance,
77 base.base.base,
78 VkInstance,
79 VK_OBJECT_TYPE_INSTANCE)
80
81 static inline struct vn_renderer_shmem *
vn_instance_cs_shmem_alloc(struct vn_instance * instance,size_t size,size_t * out_offset)82 vn_instance_cs_shmem_alloc(struct vn_instance *instance,
83 size_t size,
84 size_t *out_offset)
85 {
86 return vn_renderer_shmem_pool_alloc(
87 instance->renderer, &instance->cs_shmem_pool, size, out_offset);
88 }
89
90 static inline struct vn_renderer_shmem *
vn_instance_reply_shmem_alloc(struct vn_instance * instance,size_t size,size_t * out_offset)91 vn_instance_reply_shmem_alloc(struct vn_instance *instance,
92 size_t size,
93 size_t *out_offset)
94 {
95 return vn_renderer_shmem_pool_alloc(
96 instance->renderer, &instance->reply_shmem_pool, size, out_offset);
97 }
98
99 static inline int
vn_instance_acquire_ring_idx(struct vn_instance * instance)100 vn_instance_acquire_ring_idx(struct vn_instance *instance)
101 {
102 mtx_lock(&instance->ring_idx_mutex);
103 int ring_idx = ffsll(~instance->ring_idx_used_mask) - 1;
104 if (ring_idx >= instance->renderer->info.max_timeline_count)
105 ring_idx = -1;
106 if (ring_idx > 0)
107 instance->ring_idx_used_mask |= (1ULL << (uint32_t)ring_idx);
108 mtx_unlock(&instance->ring_idx_mutex);
109
110 assert(ring_idx); /* never acquire the dedicated CPU ring */
111
112 /* returns -1 when no vacant rings */
113 return ring_idx;
114 }
115
116 static inline void
vn_instance_release_ring_idx(struct vn_instance * instance,uint32_t ring_idx)117 vn_instance_release_ring_idx(struct vn_instance *instance, uint32_t ring_idx)
118 {
119 assert(ring_idx > 0);
120
121 mtx_lock(&instance->ring_idx_mutex);
122 assert(instance->ring_idx_used_mask & (1ULL << ring_idx));
123 instance->ring_idx_used_mask &= ~(1ULL << ring_idx);
124 mtx_unlock(&instance->ring_idx_mutex);
125 }
126
127 #endif /* VN_INSTANCE_H */
128