1 /*
2 * Copyright © 2021 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Mike Blumenkrantz <[email protected]>
25 */
26
27 #ifndef ZINK_BO_H
28 #define ZINK_BO_H
29 #include "zink_types.h"
30 #include "zink_batch.h"
31
32 #define VK_VIS_VRAM (VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
33 #define VK_STAGING_RAM (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
34 #define VK_LAZY_VRAM (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
35
36
37 static ALWAYS_INLINE enum zink_alloc_flag
zink_alloc_flags_from_heap(enum zink_heap heap)38 zink_alloc_flags_from_heap(enum zink_heap heap)
39 {
40 switch (heap) {
41 case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
42 return ZINK_ALLOC_SPARSE;
43 break;
44 default:
45 break;
46 }
47 return (enum zink_alloc_flag)0;
48 }
49
50 static ALWAYS_INLINE VkMemoryPropertyFlags
vk_domain_from_heap(enum zink_heap heap)51 vk_domain_from_heap(enum zink_heap heap)
52 {
53 VkMemoryPropertyFlags domains = (VkMemoryPropertyFlags)0;
54
55 switch (heap) {
56 case ZINK_HEAP_DEVICE_LOCAL:
57 case ZINK_HEAP_DEVICE_LOCAL_SPARSE:
58 domains = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
59 break;
60 case ZINK_HEAP_DEVICE_LOCAL_LAZY:
61 domains = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
62 break;
63 case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
64 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
65 break;
66 case ZINK_HEAP_HOST_VISIBLE_COHERENT:
67 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
68 break;
69 case ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED:
70 domains = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
71 break;
72 default:
73 break;
74 }
75 return domains;
76 }
77
78 static ALWAYS_INLINE enum zink_heap
zink_heap_from_domain_flags(VkMemoryPropertyFlags domains,enum zink_alloc_flag flags)79 zink_heap_from_domain_flags(VkMemoryPropertyFlags domains, enum zink_alloc_flag flags)
80 {
81 if (flags & ZINK_ALLOC_SPARSE)
82 return ZINK_HEAP_DEVICE_LOCAL_SPARSE;
83
84 if ((domains & VK_VIS_VRAM) == VK_VIS_VRAM)
85 return ZINK_HEAP_DEVICE_LOCAL_VISIBLE;
86
87 if (domains & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
88 return ZINK_HEAP_DEVICE_LOCAL;
89
90 if (domains & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
91 return ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED;
92
93 return ZINK_HEAP_HOST_VISIBLE_COHERENT;
94 }
95
96 static ALWAYS_INLINE unsigned
zink_mem_type_idx_from_types(struct zink_screen * screen,enum zink_heap heap,uint32_t types)97 zink_mem_type_idx_from_types(struct zink_screen *screen, enum zink_heap heap, uint32_t types)
98 {
99 for (unsigned i = 0; i < screen->heap_count[heap]; i++) {
100 if (types & BITFIELD_BIT(screen->heap_map[heap][i])) {
101 return screen->heap_map[heap][i];
102 }
103 }
104 return UINT32_MAX;
105 }
106
107 bool
108 zink_bo_init(struct zink_screen *screen);
109
110 void
111 zink_bo_deinit(struct zink_screen *screen);
112
113 struct pb_buffer *
114 zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, unsigned mem_type_idx, const void *pNext);
115
116 bool
117 zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle);
118
119 static ALWAYS_INLINE uint64_t
zink_bo_get_offset(const struct zink_bo * bo)120 zink_bo_get_offset(const struct zink_bo *bo)
121 {
122 return bo->offset;
123 }
124
125 static ALWAYS_INLINE VkDeviceMemory
zink_bo_get_mem(const struct zink_bo * bo)126 zink_bo_get_mem(const struct zink_bo *bo)
127 {
128 return bo->mem ? bo->mem : bo->u.slab.real->mem;
129 }
130
131 static ALWAYS_INLINE VkDeviceSize
zink_bo_get_size(const struct zink_bo * bo)132 zink_bo_get_size(const struct zink_bo *bo)
133 {
134 return bo->mem ? bo->base.base.size : bo->u.slab.real->base.base.size;
135 }
136
137 void *
138 zink_bo_map(struct zink_screen *screen, struct zink_bo *bo);
139 void
140 zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
141
142 bool
143 zink_bo_commit(struct zink_context *ctx, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem);
144
145 static ALWAYS_INLINE bool
zink_bo_has_unflushed_usage(const struct zink_bo * bo)146 zink_bo_has_unflushed_usage(const struct zink_bo *bo)
147 {
148 return zink_batch_usage_is_unflushed(bo->reads.u) ||
149 zink_batch_usage_is_unflushed(bo->writes.u);
150 }
151
152 static ALWAYS_INLINE bool
zink_bo_has_usage(const struct zink_bo * bo)153 zink_bo_has_usage(const struct zink_bo *bo)
154 {
155 return zink_bo_has_unflushed_usage(bo) ||
156 (zink_batch_usage_exists(bo->reads.u) && bo->reads.submit_count == bo->reads.u->submit_count) ||
157 (zink_batch_usage_exists(bo->writes.u) && bo->writes.submit_count == bo->writes.u->submit_count);
158 }
159
160 static ALWAYS_INLINE bool
zink_bo_usage_matches(const struct zink_bo * bo,const struct zink_batch_state * bs)161 zink_bo_usage_matches(const struct zink_bo *bo, const struct zink_batch_state *bs)
162 {
163 return (zink_batch_usage_matches(bo->reads.u, bs) && bo->reads.submit_count == bo->reads.u->submit_count) ||
164 (zink_batch_usage_matches(bo->writes.u, bs) && bo->writes.submit_count == bo->writes.u->submit_count);
165 }
166
167 static ALWAYS_INLINE bool
zink_bo_usage_check_completion(struct zink_screen * screen,struct zink_bo * bo,enum zink_resource_access access)168 zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
169 {
170 if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads.u))
171 return false;
172 if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes.u))
173 return false;
174 return true;
175 }
176
177 static ALWAYS_INLINE bool
zink_bo_usage_check_completion_fast(struct zink_screen * screen,struct zink_bo * bo,enum zink_resource_access access)178 zink_bo_usage_check_completion_fast(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
179 {
180 if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion_fast(screen, bo->reads.u))
181 return false;
182 if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion_fast(screen, bo->writes.u))
183 return false;
184 return true;
185 }
186
187 static ALWAYS_INLINE void
zink_bo_usage_wait(struct zink_context * ctx,struct zink_bo * bo,enum zink_resource_access access)188 zink_bo_usage_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
189 {
190 if (access & ZINK_RESOURCE_ACCESS_READ)
191 zink_batch_usage_wait(ctx, bo->reads.u);
192 if (access & ZINK_RESOURCE_ACCESS_WRITE)
193 zink_batch_usage_wait(ctx, bo->writes.u);
194 }
195
196 static ALWAYS_INLINE void
zink_bo_usage_try_wait(struct zink_context * ctx,struct zink_bo * bo,enum zink_resource_access access)197 zink_bo_usage_try_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
198 {
199 if (access & ZINK_RESOURCE_ACCESS_READ)
200 zink_batch_usage_try_wait(ctx, bo->reads.u);
201 if (access & ZINK_RESOURCE_ACCESS_WRITE)
202 zink_batch_usage_try_wait(ctx, bo->writes.u);
203 }
204
205 static ALWAYS_INLINE void
zink_bo_usage_set(struct zink_bo * bo,struct zink_batch_state * bs,bool write)206 zink_bo_usage_set(struct zink_bo *bo, struct zink_batch_state *bs, bool write)
207 {
208 if (write) {
209 zink_batch_usage_set(&bo->writes.u, bs);
210 bo->writes.submit_count = bs->usage.submit_count;
211 } else {
212 zink_batch_usage_set(&bo->reads.u, bs);
213 bo->reads.submit_count = bs->usage.submit_count;
214 }
215 }
216
217 static ALWAYS_INLINE bool
zink_bo_usage_unset(struct zink_bo * bo,struct zink_batch_state * bs)218 zink_bo_usage_unset(struct zink_bo *bo, struct zink_batch_state *bs)
219 {
220 zink_batch_usage_unset(&bo->reads.u, bs);
221 zink_batch_usage_unset(&bo->writes.u, bs);
222 return bo->reads.u || bo->writes.u;
223 }
224
225
226 static ALWAYS_INLINE void
zink_bo_unref(struct zink_screen * screen,struct zink_bo * bo)227 zink_bo_unref(struct zink_screen *screen, struct zink_bo *bo)
228 {
229 struct pb_buffer *pbuf = &bo->base;
230 pb_reference_with_winsys(screen, &pbuf, NULL);
231 }
232
233 #endif
234