xref: /aosp_15_r20/external/mesa3d/src/virtio/vulkan/vn_common.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #ifndef VN_COMMON_H
12 #define VN_COMMON_H
13 
14 #include <assert.h>
15 #include <inttypes.h>
16 #include <limits.h>
17 #include <stdatomic.h>
18 #include <stdbool.h>
19 #include <stddef.h>
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/syscall.h>
24 #include <vulkan/vulkan.h>
25 
26 #include "c11/threads.h"
27 #include "drm-uapi/drm_fourcc.h"
28 #include "util/bitscan.h"
29 #include "util/bitset.h"
30 #include "util/compiler.h"
31 #include "util/detect_os.h"
32 #include "util/libsync.h"
33 #include "util/list.h"
34 #include "util/macros.h"
35 #include "util/os_time.h"
36 #include "util/perf/cpu_trace.h"
37 #include "util/simple_mtx.h"
38 #include "util/u_atomic.h"
39 #include "util/u_math.h"
40 #include "util/xmlconfig.h"
41 #include "vk_alloc.h"
42 #include "vk_debug_report.h"
43 #include "vk_device.h"
44 #include "vk_device_memory.h"
45 #include "vk_image.h"
46 #include "vk_instance.h"
47 #include "vk_object.h"
48 #include "vk_physical_device.h"
49 #include "vk_queue.h"
50 #include "vk_util.h"
51 
52 #include "vn_entrypoints.h"
53 
54 #define VN_DEFAULT_ALIGN             8
55 #define VN_WATCHDOG_REPORT_PERIOD_US 3000000
56 
57 #define VN_DEBUG(category) (unlikely(vn_env.debug & VN_DEBUG_##category))
58 #define VN_PERF(category)  (unlikely(vn_env.perf & VN_PERF_##category))
59 
60 #define vn_error(instance, error)                                            \
61    (VN_DEBUG(RESULT) ? vn_log_result((instance), (error), __func__) : (error))
62 #define vn_result(instance, result)                                          \
63    ((result) >= VK_SUCCESS ? (result) : vn_error((instance), (result)))
64 
65 #define VN_TRACE_SCOPE(name) MESA_TRACE_SCOPE(name)
66 #define VN_TRACE_FUNC()      MESA_TRACE_SCOPE(__func__)
67 
68 struct vn_instance;
69 struct vn_physical_device;
70 struct vn_device;
71 struct vn_queue;
72 struct vn_fence;
73 struct vn_semaphore;
74 struct vn_device_memory;
75 struct vn_buffer;
76 struct vn_buffer_view;
77 struct vn_image;
78 struct vn_image_view;
79 struct vn_sampler;
80 struct vn_sampler_ycbcr_conversion;
81 struct vn_descriptor_set_layout;
82 struct vn_descriptor_pool;
83 struct vn_descriptor_set;
84 struct vn_descriptor_update_template;
85 struct vn_render_pass;
86 struct vn_framebuffer;
87 struct vn_event;
88 struct vn_query_pool;
89 struct vn_shader_module;
90 struct vn_pipeline_layout;
91 struct vn_pipeline_cache;
92 struct vn_pipeline;
93 struct vn_command_pool;
94 struct vn_command_buffer;
95 
96 struct vn_cs_encoder;
97 struct vn_cs_decoder;
98 struct vn_ring;
99 
100 struct vn_renderer;
101 struct vn_renderer_shmem;
102 struct vn_renderer_bo;
103 struct vn_renderer_sync;
104 
105 enum vn_debug {
106    VN_DEBUG_INIT = 1ull << 0,
107    VN_DEBUG_RESULT = 1ull << 1,
108    VN_DEBUG_VTEST = 1ull << 2,
109    VN_DEBUG_WSI = 1ull << 3,
110    VN_DEBUG_NO_ABORT = 1ull << 4,
111    VN_DEBUG_LOG_CTX_INFO = 1ull << 5,
112    VN_DEBUG_CACHE = 1ull << 6,
113    VN_DEBUG_NO_SPARSE = 1ull << 7,
114    VN_DEBUG_NO_GPL = 1ull << 8,
115 };
116 
117 enum vn_perf {
118    VN_PERF_NO_ASYNC_SET_ALLOC = 1ull << 0,
119    VN_PERF_NO_ASYNC_BUFFER_CREATE = 1ull << 1,
120    VN_PERF_NO_ASYNC_QUEUE_SUBMIT = 1ull << 2,
121    VN_PERF_NO_EVENT_FEEDBACK = 1ull << 3,
122    VN_PERF_NO_FENCE_FEEDBACK = 1ull << 4,
123    VN_PERF_NO_CMD_BATCHING = 1ull << 6,
124    VN_PERF_NO_SEMAPHORE_FEEDBACK = 1ull << 7,
125    VN_PERF_NO_QUERY_FEEDBACK = 1ull << 8,
126    VN_PERF_NO_ASYNC_MEM_ALLOC = 1ull << 9,
127    VN_PERF_NO_TILED_WSI_IMAGE = 1ull << 10,
128    VN_PERF_NO_MULTI_RING = 1ull << 11,
129    VN_PERF_NO_ASYNC_IMAGE_CREATE = 1ull << 12,
130    VN_PERF_NO_ASYNC_IMAGE_FORMAT = 1ull << 13,
131 };
132 
133 typedef uint64_t vn_object_id;
134 
135 /* base class of vn_instance */
136 struct vn_instance_base {
137    struct vk_instance base;
138    vn_object_id id;
139 };
140 
141 /* base class of vn_physical_device */
142 struct vn_physical_device_base {
143    struct vk_physical_device base;
144    vn_object_id id;
145 };
146 
147 /* base class of vn_device */
148 struct vn_device_base {
149    struct vk_device base;
150    vn_object_id id;
151 };
152 
153 /* base class of vn_queue */
154 struct vn_queue_base {
155    struct vk_queue base;
156    vn_object_id id;
157 };
158 
159 /* base class of vn_device_memory */
160 struct vn_device_memory_base {
161    struct vk_device_memory base;
162    vn_object_id id;
163 };
164 
165 /* base class of vn_image */
166 struct vn_image_base {
167    struct vk_image base;
168    vn_object_id id;
169 };
170 
171 /* base class of other driver objects */
172 struct vn_object_base {
173    struct vk_object_base base;
174    vn_object_id id;
175 };
176 
177 struct vn_refcount {
178    atomic_int count;
179 };
180 
181 struct vn_env {
182    uint64_t debug;
183    uint64_t perf;
184 };
185 extern struct vn_env vn_env;
186 
187 /* Only one "waiting" thread may fulfill the "watchdog" role at a time. Every
188  * VN_WATCHDOG_REPORT_PERIOD_US or longer, the watchdog tests the ring's ALIVE
189  * status, updates the "alive" atomic, and resets the ALIVE status for the
190  * next cycle. Other waiting threads just check the "alive" atomic. The
191  * watchdog role may be released and acquired by another waiting thread
192  * dynamically.
193  *
194  * Examples of "waiting" are to wait for:
195  * - ring to reach a seqno
196  * - ring space to be released
197  * - sync primitives to signal
198  * - query result being available
199  */
200 struct vn_watchdog {
201    mtx_t mutex;
202    atomic_int tid;
203    atomic_bool alive;
204 };
205 
206 enum vn_relax_reason {
207    VN_RELAX_REASON_RING_SEQNO,
208    VN_RELAX_REASON_TLS_RING_SEQNO,
209    VN_RELAX_REASON_RING_SPACE,
210    VN_RELAX_REASON_FENCE,
211    VN_RELAX_REASON_SEMAPHORE,
212    VN_RELAX_REASON_QUERY,
213 };
214 
215 /* vn_relax_profile defines the driver side polling behavior
216  *
217  * - base_sleep_us:
218  *   - the minimum polling interval after initial busy waits
219  *
220  * - busy_wait_order:
221  *   - initial 2 ^ busy_wait_order times thrd_yield()
222  *
223  * - warn_order:
224  *   - number of polls at order N:
225  *     - fn_cnt(N) = 2 ^ N
226  *   - interval of poll at order N:
227  *     - fn_step(N) = base_sleep_us * (2 ^ (N - busy_wait_order))
228  *   - warn occasionally if we have slept at least:
229  *     - for (i = busy_wait_order; i < warn_order; i++)
230  *          total_sleep += fn_cnt(i) * fn_step(i)
231  *
232  * - abort_order:
233  *   - similar to warn_order, but would abort() instead
234  */
235 struct vn_relax_profile {
236    uint32_t base_sleep_us;
237    uint32_t busy_wait_order;
238    uint32_t warn_order;
239    uint32_t abort_order;
240 };
241 
242 struct vn_relax_state {
243    struct vn_instance *instance;
244    uint32_t iter;
245    const struct vn_relax_profile profile;
246    const char *reason_str;
247 };
248 
249 /* TLS ring
250  * - co-owned by TLS and VkInstance
251  * - initialized in TLS upon requested
252  * - teardown happens upon thread exit or instance destroy
253  * - teardown is split into 2 stages:
254  *   1. one owner locks and destroys the ring and mark destroyed
255  *   2. the other owner locks and frees up the tls ring storage
256  */
257 struct vn_tls_ring {
258    mtx_t mutex;
259    struct vn_ring *ring;
260    struct vn_instance *instance;
261    struct list_head tls_head;
262    struct list_head vk_head;
263 };
264 
265 struct vn_tls {
266    /* Track the threads on which swapchain and command pool creations occur.
267     * Pipeline create on those threads are forced async via the primary ring.
268     */
269    bool async_pipeline_create;
270    /* Track TLS rings owned across instances. */
271    struct list_head tls_rings;
272 };
273 
274 /* A cached storage for object internal usages with below constraints:
275  * - It belongs to the object and shares the lifetime.
276  * - The storage reuse is protected by external synchronization.
277  * - The returned storage is not zero-initialized.
278  * - It never shrinks unless being purged via fini.
279  *
280  * The current users are:
281  * - VkCommandPool
282  * - VkQueue
283  */
284 struct vn_cached_storage {
285    const VkAllocationCallbacks *alloc;
286    size_t size;
287    void *data;
288 };
289 
290 void
291 vn_env_init(void);
292 
293 void
294 vn_trace_init(void);
295 
296 void
297 vn_log(struct vn_instance *instance, const char *format, ...)
298    PRINTFLIKE(2, 3);
299 
300 VkResult
301 vn_log_result(struct vn_instance *instance,
302               VkResult result,
303               const char *where);
304 
305 #define VN_REFCOUNT_INIT(val)                                                \
306    (struct vn_refcount)                                                      \
307    {                                                                         \
308       .count = (val),                                                        \
309    }
310 
311 static inline int
vn_refcount_load_relaxed(const struct vn_refcount * ref)312 vn_refcount_load_relaxed(const struct vn_refcount *ref)
313 {
314    return atomic_load_explicit(&ref->count, memory_order_relaxed);
315 }
316 
317 static inline int
vn_refcount_fetch_add_relaxed(struct vn_refcount * ref,int val)318 vn_refcount_fetch_add_relaxed(struct vn_refcount *ref, int val)
319 {
320    return atomic_fetch_add_explicit(&ref->count, val, memory_order_relaxed);
321 }
322 
323 static inline int
vn_refcount_fetch_sub_release(struct vn_refcount * ref,int val)324 vn_refcount_fetch_sub_release(struct vn_refcount *ref, int val)
325 {
326    return atomic_fetch_sub_explicit(&ref->count, val, memory_order_release);
327 }
328 
329 static inline bool
vn_refcount_is_valid(const struct vn_refcount * ref)330 vn_refcount_is_valid(const struct vn_refcount *ref)
331 {
332    return vn_refcount_load_relaxed(ref) > 0;
333 }
334 
335 static inline void
vn_refcount_inc(struct vn_refcount * ref)336 vn_refcount_inc(struct vn_refcount *ref)
337 {
338    /* no ordering imposed */
339    ASSERTED const int old = vn_refcount_fetch_add_relaxed(ref, 1);
340    assert(old >= 1);
341 }
342 
343 static inline bool
vn_refcount_dec(struct vn_refcount * ref)344 vn_refcount_dec(struct vn_refcount *ref)
345 {
346    /* prior reads/writes cannot be reordered after this */
347    const int old = vn_refcount_fetch_sub_release(ref, 1);
348    assert(old >= 1);
349 
350    /* subsequent free cannot be reordered before this */
351    if (old == 1)
352       atomic_thread_fence(memory_order_acquire);
353 
354    return old == 1;
355 }
356 
357 extern uint64_t vn_next_obj_id;
358 
359 static inline uint64_t
vn_get_next_obj_id(void)360 vn_get_next_obj_id(void)
361 {
362    return p_atomic_fetch_add(&vn_next_obj_id, 1);
363 }
364 
365 uint32_t
366 vn_extension_get_spec_version(const char *name);
367 
368 static inline void
vn_watchdog_init(struct vn_watchdog * watchdog)369 vn_watchdog_init(struct vn_watchdog *watchdog)
370 {
371 #ifndef NDEBUG
372    /* ensure minimum check period is greater than maximum renderer
373     * reporting period (with margin of safety to ensure no false
374     * positives).
375     *
376     * first_warn_time is pre-calculated based on parameters in vn_relax
377     * and must update together.
378     */
379    static const uint32_t first_warn_time = 3481600;
380    static const uint32_t safety_margin = 250000;
381    assert(first_warn_time - safety_margin >= VN_WATCHDOG_REPORT_PERIOD_US);
382 #endif
383 
384    mtx_init(&watchdog->mutex, mtx_plain);
385 
386    watchdog->tid = 0;
387 
388    /* initialized to be alive to avoid vn_watchdog_timout false alarm */
389    watchdog->alive = true;
390 }
391 
392 static inline void
vn_watchdog_fini(struct vn_watchdog * watchdog)393 vn_watchdog_fini(struct vn_watchdog *watchdog)
394 {
395    mtx_destroy(&watchdog->mutex);
396 }
397 
398 struct vn_relax_state
399 vn_relax_init(struct vn_instance *instance, enum vn_relax_reason reason);
400 
401 void
402 vn_relax(struct vn_relax_state *state);
403 
404 void
405 vn_relax_fini(struct vn_relax_state *state);
406 
407 static_assert(sizeof(vn_object_id) >= sizeof(uintptr_t), "");
408 
409 static inline VkResult
vn_instance_base_init(struct vn_instance_base * instance,const struct vk_instance_extension_table * supported_extensions,const struct vk_instance_dispatch_table * dispatch_table,const VkInstanceCreateInfo * info,const VkAllocationCallbacks * alloc)410 vn_instance_base_init(
411    struct vn_instance_base *instance,
412    const struct vk_instance_extension_table *supported_extensions,
413    const struct vk_instance_dispatch_table *dispatch_table,
414    const VkInstanceCreateInfo *info,
415    const VkAllocationCallbacks *alloc)
416 {
417    VkResult result = vk_instance_init(&instance->base, supported_extensions,
418                                       dispatch_table, info, alloc);
419    instance->id = vn_get_next_obj_id();
420    return result;
421 }
422 
423 static inline void
vn_instance_base_fini(struct vn_instance_base * instance)424 vn_instance_base_fini(struct vn_instance_base *instance)
425 {
426    vk_instance_finish(&instance->base);
427 }
428 
429 static inline VkResult
vn_physical_device_base_init(struct vn_physical_device_base * physical_dev,struct vn_instance_base * instance,const struct vk_device_extension_table * supported_extensions,const struct vk_physical_device_dispatch_table * dispatch_table)430 vn_physical_device_base_init(
431    struct vn_physical_device_base *physical_dev,
432    struct vn_instance_base *instance,
433    const struct vk_device_extension_table *supported_extensions,
434    const struct vk_physical_device_dispatch_table *dispatch_table)
435 {
436    VkResult result = vk_physical_device_init(
437       &physical_dev->base, &instance->base, supported_extensions, NULL, NULL,
438       dispatch_table);
439    physical_dev->id = vn_get_next_obj_id();
440    return result;
441 }
442 
443 static inline void
vn_physical_device_base_fini(struct vn_physical_device_base * physical_dev)444 vn_physical_device_base_fini(struct vn_physical_device_base *physical_dev)
445 {
446    vk_physical_device_finish(&physical_dev->base);
447 }
448 
449 static inline VkResult
vn_device_base_init(struct vn_device_base * dev,struct vn_physical_device_base * physical_dev,const struct vk_device_dispatch_table * dispatch_table,const VkDeviceCreateInfo * info,const VkAllocationCallbacks * alloc)450 vn_device_base_init(struct vn_device_base *dev,
451                     struct vn_physical_device_base *physical_dev,
452                     const struct vk_device_dispatch_table *dispatch_table,
453                     const VkDeviceCreateInfo *info,
454                     const VkAllocationCallbacks *alloc)
455 {
456    VkResult result = vk_device_init(&dev->base, &physical_dev->base,
457                                     dispatch_table, info, alloc);
458    dev->id = vn_get_next_obj_id();
459    return result;
460 }
461 
462 static inline void
vn_device_base_fini(struct vn_device_base * dev)463 vn_device_base_fini(struct vn_device_base *dev)
464 {
465    vk_device_finish(&dev->base);
466 }
467 
468 static inline VkResult
vn_queue_base_init(struct vn_queue_base * queue,struct vn_device_base * dev,const VkDeviceQueueCreateInfo * queue_info,uint32_t queue_index)469 vn_queue_base_init(struct vn_queue_base *queue,
470                    struct vn_device_base *dev,
471                    const VkDeviceQueueCreateInfo *queue_info,
472                    uint32_t queue_index)
473 {
474    VkResult result =
475       vk_queue_init(&queue->base, &dev->base, queue_info, queue_index);
476    queue->id = vn_get_next_obj_id();
477    return result;
478 }
479 
480 static inline void
vn_queue_base_fini(struct vn_queue_base * queue)481 vn_queue_base_fini(struct vn_queue_base *queue)
482 {
483    vk_queue_finish(&queue->base);
484 }
485 
486 static inline void
vn_object_base_init(struct vn_object_base * obj,VkObjectType type,struct vn_device_base * dev)487 vn_object_base_init(struct vn_object_base *obj,
488                     VkObjectType type,
489                     struct vn_device_base *dev)
490 {
491    vk_object_base_init(&dev->base, &obj->base, type);
492    obj->id = vn_get_next_obj_id();
493 }
494 
495 static inline void
vn_object_base_fini(struct vn_object_base * obj)496 vn_object_base_fini(struct vn_object_base *obj)
497 {
498    vk_object_base_finish(&obj->base);
499 }
500 
501 static inline void
vn_object_set_id(void * obj,vn_object_id id,VkObjectType type)502 vn_object_set_id(void *obj, vn_object_id id, VkObjectType type)
503 {
504    assert(((const struct vk_object_base *)obj)->type == type);
505    switch (type) {
506    case VK_OBJECT_TYPE_INSTANCE:
507       ((struct vn_instance_base *)obj)->id = id;
508       break;
509    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
510       ((struct vn_physical_device_base *)obj)->id = id;
511       break;
512    case VK_OBJECT_TYPE_DEVICE:
513       ((struct vn_device_base *)obj)->id = id;
514       break;
515    case VK_OBJECT_TYPE_QUEUE:
516       ((struct vn_queue_base *)obj)->id = id;
517       break;
518    case VK_OBJECT_TYPE_DEVICE_MEMORY:
519       ((struct vn_device_memory_base *)obj)->id = id;
520       break;
521    case VK_OBJECT_TYPE_IMAGE:
522       ((struct vn_image_base *)obj)->id = id;
523       break;
524    default:
525       ((struct vn_object_base *)obj)->id = id;
526       break;
527    }
528 }
529 
530 static inline vn_object_id
vn_object_get_id(const void * obj,VkObjectType type)531 vn_object_get_id(const void *obj, VkObjectType type)
532 {
533    assert(((const struct vk_object_base *)obj)->type == type);
534    switch (type) {
535    case VK_OBJECT_TYPE_INSTANCE:
536       return ((struct vn_instance_base *)obj)->id;
537    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
538       return ((struct vn_physical_device_base *)obj)->id;
539    case VK_OBJECT_TYPE_DEVICE:
540       return ((struct vn_device_base *)obj)->id;
541    case VK_OBJECT_TYPE_QUEUE:
542       return ((struct vn_queue_base *)obj)->id;
543    case VK_OBJECT_TYPE_DEVICE_MEMORY:
544       return ((struct vn_device_memory_base *)obj)->id;
545    case VK_OBJECT_TYPE_IMAGE:
546       return ((struct vn_image_base *)obj)->id;
547    default:
548       return ((struct vn_object_base *)obj)->id;
549    }
550 }
551 
552 static inline pid_t
vn_gettid(void)553 vn_gettid(void)
554 {
555 #if DETECT_OS_ANDROID
556    return gettid();
557 #else
558    return syscall(SYS_gettid);
559 #endif
560 }
561 
562 struct vn_tls *
563 vn_tls_get(void);
564 
565 static inline void
vn_tls_set_async_pipeline_create(void)566 vn_tls_set_async_pipeline_create(void)
567 {
568    struct vn_tls *tls = vn_tls_get();
569    if (likely(tls))
570       tls->async_pipeline_create = true;
571 }
572 
573 static inline bool
vn_tls_get_async_pipeline_create(void)574 vn_tls_get_async_pipeline_create(void)
575 {
576    const struct vn_tls *tls = vn_tls_get();
577    if (likely(tls))
578       return tls->async_pipeline_create;
579    return true;
580 }
581 
582 struct vn_ring *
583 vn_tls_get_ring(struct vn_instance *instance);
584 
585 void
586 vn_tls_destroy_ring(struct vn_tls_ring *tls_ring);
587 
588 static inline uint32_t
vn_cache_key_hash_function(const void * key)589 vn_cache_key_hash_function(const void *key)
590 {
591    return _mesa_hash_data(key, SHA1_DIGEST_LENGTH);
592 }
593 
594 static inline bool
vn_cache_key_equal_function(const void * key1,const void * key2)595 vn_cache_key_equal_function(const void *key1, const void *key2)
596 {
597    return memcmp(key1, key2, SHA1_DIGEST_LENGTH) == 0;
598 }
599 
600 static inline void
vn_cached_storage_init(struct vn_cached_storage * storage,const VkAllocationCallbacks * alloc)601 vn_cached_storage_init(struct vn_cached_storage *storage,
602                        const VkAllocationCallbacks *alloc)
603 {
604    storage->alloc = alloc;
605    storage->size = 0;
606    storage->data = NULL;
607 }
608 
609 static inline void *
vn_cached_storage_get(struct vn_cached_storage * storage,size_t size)610 vn_cached_storage_get(struct vn_cached_storage *storage, size_t size)
611 {
612    if (size > storage->size) {
613       void *data =
614          vk_realloc(storage->alloc, storage->data, size, VN_DEFAULT_ALIGN,
615                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
616       if (!data)
617          return NULL;
618 
619       storage->size = size;
620       storage->data = data;
621    }
622    return storage->data;
623 }
624 
625 static inline void
vn_cached_storage_fini(struct vn_cached_storage * storage)626 vn_cached_storage_fini(struct vn_cached_storage *storage)
627 {
628    vk_free(storage->alloc, storage->data);
629 }
630 
631 #endif /* VN_COMMON_H */
632