1 /*
2 * Copyright 2021 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /** VK_EXT_headless_surface */
25
26 #include "util/macros.h"
27 #include "util/hash_table.h"
28 #include "util/timespec.h"
29 #include "util/u_thread.h"
30 #include "util/xmlconfig.h"
31 #include "vk_util.h"
32 #include "vk_enum_to_str.h"
33 #include "vk_instance.h"
34 #include "vk_physical_device.h"
35 #include "wsi_common_entrypoints.h"
36 #include "wsi_common_private.h"
37 #include "wsi_common_queue.h"
38
39 #include "drm-uapi/drm_fourcc.h"
40
41 struct wsi_headless {
42 struct wsi_interface base;
43
44 struct wsi_device *wsi;
45
46 const VkAllocationCallbacks *alloc;
47 VkPhysicalDevice physical_device;
48 };
49
50 static VkResult
wsi_headless_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)51 wsi_headless_surface_get_support(VkIcdSurfaceBase *surface,
52 struct wsi_device *wsi_device,
53 uint32_t queueFamilyIndex,
54 VkBool32* pSupported)
55 {
56 *pSupported = true;
57
58 return VK_SUCCESS;
59 }
60
61 static const VkPresentModeKHR present_modes[] = {
62 VK_PRESENT_MODE_MAILBOX_KHR,
63 VK_PRESENT_MODE_FIFO_KHR,
64 };
65
66 static VkResult
wsi_headless_surface_get_capabilities(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkSurfaceCapabilitiesKHR * caps)67 wsi_headless_surface_get_capabilities(VkIcdSurfaceBase *surface,
68 struct wsi_device *wsi_device,
69 VkSurfaceCapabilitiesKHR* caps)
70 {
71 /* For true mailbox mode, we need at least 4 images:
72 * 1) One to scan out from
73 * 2) One to have queued for scan-out
74 * 3) One to be currently held by the Wayland compositor
75 * 4) One to render to
76 */
77 caps->minImageCount = 4;
78 /* There is no real maximum */
79 caps->maxImageCount = 0;
80
81 caps->currentExtent = (VkExtent2D) { -1, -1 };
82 caps->minImageExtent = (VkExtent2D) { 1, 1 };
83 caps->maxImageExtent = (VkExtent2D) {
84 wsi_device->maxImageDimension2D,
85 wsi_device->maxImageDimension2D,
86 };
87
88 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
89 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
90 caps->maxImageArrayLayers = 1;
91
92 caps->supportedCompositeAlpha =
93 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
94 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
95
96 caps->supportedUsageFlags = wsi_caps_get_image_usage();
97
98 VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
99 if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
100 caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
101
102 return VK_SUCCESS;
103 }
104
105 static VkResult
wsi_headless_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)106 wsi_headless_surface_get_capabilities2(VkIcdSurfaceBase *surface,
107 struct wsi_device *wsi_device,
108 const void *info_next,
109 VkSurfaceCapabilities2KHR* caps)
110 {
111 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
112
113 VkResult result =
114 wsi_headless_surface_get_capabilities(surface, wsi_device,
115 &caps->surfaceCapabilities);
116
117 vk_foreach_struct(ext, caps->pNext) {
118 switch (ext->sType) {
119 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
120 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
121 protected->supportsProtected = VK_FALSE;
122 break;
123 }
124
125 default:
126 /* Ignored */
127 break;
128 }
129 }
130
131 return result;
132 }
133
134 static VkResult
wsi_headless_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)135 wsi_headless_surface_get_formats(VkIcdSurfaceBase *icd_surface,
136 struct wsi_device *wsi_device,
137 uint32_t* pSurfaceFormatCount,
138 VkSurfaceFormatKHR* pSurfaceFormats)
139 {
140 struct wsi_headless *wsi =
141 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
142
143 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out, pSurfaceFormats, pSurfaceFormatCount);
144
145 if (wsi->wsi->force_bgra8_unorm_first) {
146 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
147 out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
148 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
149 }
150 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
151 out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
152 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
153 }
154 } else {
155 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
156 out_fmt->format = VK_FORMAT_R8G8B8A8_UNORM;
157 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
158 }
159 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
160 out_fmt->format = VK_FORMAT_B8G8R8A8_UNORM;
161 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
162 }
163 }
164
165 return vk_outarray_status(&out);
166 }
167
168 static VkResult
wsi_headless_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)169 wsi_headless_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
170 struct wsi_device *wsi_device,
171 const void *info_next,
172 uint32_t* pSurfaceFormatCount,
173 VkSurfaceFormat2KHR* pSurfaceFormats)
174 {
175 struct wsi_headless *wsi =
176 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
177
178 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out, pSurfaceFormats, pSurfaceFormatCount);
179
180 if (wsi->wsi->force_bgra8_unorm_first) {
181 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
182 out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
183 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
184 }
185 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
186 out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
187 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
188 }
189 } else {
190 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
191 out_fmt->surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
192 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
193 }
194 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
195 out_fmt->surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
196 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
197 }
198 }
199
200 return vk_outarray_status(&out);
201 }
202
203 static VkResult
wsi_headless_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)204 wsi_headless_surface_get_present_modes(VkIcdSurfaceBase *surface,
205 struct wsi_device *wsi_device,
206 uint32_t* pPresentModeCount,
207 VkPresentModeKHR* pPresentModes)
208 {
209 if (pPresentModes == NULL) {
210 *pPresentModeCount = ARRAY_SIZE(present_modes);
211 return VK_SUCCESS;
212 }
213
214 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
215 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
216
217 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
218 return VK_INCOMPLETE;
219 else
220 return VK_SUCCESS;
221 }
222
223 static VkResult
wsi_headless_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)224 wsi_headless_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
225 struct wsi_device *wsi_device,
226 uint32_t* pRectCount,
227 VkRect2D* pRects)
228 {
229 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
230
231 vk_outarray_append_typed(VkRect2D, &out, rect) {
232 /* We don't know a size so just return the usual "I don't know." */
233 *rect = (VkRect2D) {
234 .offset = { 0, 0 },
235 .extent = { UINT32_MAX, UINT32_MAX },
236 };
237 }
238
239 return vk_outarray_status(&out);
240 }
241
242 struct wsi_headless_image {
243 struct wsi_image base;
244 bool busy;
245 };
246
247 struct wsi_headless_swapchain {
248 struct wsi_swapchain base;
249
250 VkExtent2D extent;
251 VkFormat vk_format;
252
253 struct u_vector modifiers;
254
255 VkPresentModeKHR present_mode;
256 bool fifo_ready;
257
258 struct wsi_headless_image images[0];
259 };
260 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_headless_swapchain, base.base, VkSwapchainKHR,
261 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
262
263 static struct wsi_image *
wsi_headless_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)264 wsi_headless_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
265 uint32_t image_index)
266 {
267 struct wsi_headless_swapchain *chain =
268 (struct wsi_headless_swapchain *)wsi_chain;
269 return &chain->images[image_index].base;
270 }
271
272 static VkResult
wsi_headless_swapchain_acquire_next_image(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)273 wsi_headless_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
274 const VkAcquireNextImageInfoKHR *info,
275 uint32_t *image_index)
276 {
277 struct wsi_headless_swapchain *chain =
278 (struct wsi_headless_swapchain *)wsi_chain;
279 struct timespec start_time, end_time;
280 struct timespec rel_timeout;
281
282 timespec_from_nsec(&rel_timeout, info->timeout);
283
284 clock_gettime(CLOCK_MONOTONIC, &start_time);
285 timespec_add(&end_time, &rel_timeout, &start_time);
286
287 while (1) {
288 /* Try to find a free image. */
289 for (uint32_t i = 0; i < chain->base.image_count; i++) {
290 if (!chain->images[i].busy) {
291 /* We found a non-busy image */
292 *image_index = i;
293 chain->images[i].busy = true;
294 return VK_SUCCESS;
295 }
296 }
297
298 /* Check for timeout. */
299 struct timespec current_time;
300 clock_gettime(CLOCK_MONOTONIC, ¤t_time);
301 if (timespec_after(¤t_time, &end_time))
302 return VK_NOT_READY;
303 }
304 }
305
306 static VkResult
wsi_headless_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)307 wsi_headless_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
308 uint32_t image_index,
309 uint64_t present_id,
310 const VkPresentRegionKHR *damage)
311 {
312 struct wsi_headless_swapchain *chain =
313 (struct wsi_headless_swapchain *)wsi_chain;
314
315 assert(image_index < chain->base.image_count);
316
317 chain->images[image_index].busy = false;
318
319 return VK_SUCCESS;
320 }
321
322 static VkResult
wsi_headless_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)323 wsi_headless_swapchain_destroy(struct wsi_swapchain *wsi_chain,
324 const VkAllocationCallbacks *pAllocator)
325 {
326 struct wsi_headless_swapchain *chain =
327 (struct wsi_headless_swapchain *)wsi_chain;
328
329 for (uint32_t i = 0; i < chain->base.image_count; i++) {
330 if (chain->images[i].base.image != VK_NULL_HANDLE)
331 wsi_destroy_image(&chain->base, &chain->images[i].base);
332 }
333
334 u_vector_finish(&chain->modifiers);
335
336 wsi_swapchain_finish(&chain->base);
337
338 vk_free(pAllocator, chain);
339
340 return VK_SUCCESS;
341 }
342
343 static const struct VkDrmFormatModifierPropertiesEXT *
get_modifier_props(const struct wsi_image_info * info,uint64_t modifier)344 get_modifier_props(const struct wsi_image_info *info, uint64_t modifier)
345 {
346 for (uint32_t i = 0; i < info->modifier_prop_count; i++) {
347 if (info->modifier_props[i].drmFormatModifier == modifier)
348 return &info->modifier_props[i];
349 }
350 return NULL;
351 }
352
353 static VkResult
wsi_create_null_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)354 wsi_create_null_image_mem(const struct wsi_swapchain *chain,
355 const struct wsi_image_info *info,
356 struct wsi_image *image)
357 {
358 const struct wsi_device *wsi = chain->wsi;
359 VkResult result;
360
361 VkMemoryRequirements reqs;
362 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
363
364 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
365 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
366 .pNext = NULL,
367 .image = image->image,
368 .buffer = VK_NULL_HANDLE,
369 };
370 const VkMemoryAllocateInfo memory_info = {
371 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
372 .pNext = &memory_dedicated_info,
373 .allocationSize = reqs.size,
374 .memoryTypeIndex =
375 wsi_select_device_memory_type(wsi, reqs.memoryTypeBits),
376 };
377 result = wsi->AllocateMemory(chain->device, &memory_info,
378 &chain->alloc, &image->memory);
379 if (result != VK_SUCCESS)
380 return result;
381
382 image->dma_buf_fd = -1;
383
384 if (info->drm_mod_list.drmFormatModifierCount > 0) {
385 VkImageDrmFormatModifierPropertiesEXT image_mod_props = {
386 .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
387 };
388 result = wsi->GetImageDrmFormatModifierPropertiesEXT(chain->device,
389 image->image,
390 &image_mod_props);
391 if (result != VK_SUCCESS)
392 return result;
393
394 image->drm_modifier = image_mod_props.drmFormatModifier;
395 assert(image->drm_modifier != DRM_FORMAT_MOD_INVALID);
396
397 const struct VkDrmFormatModifierPropertiesEXT *mod_props =
398 get_modifier_props(info, image->drm_modifier);
399 image->num_planes = mod_props->drmFormatModifierPlaneCount;
400
401 for (uint32_t p = 0; p < image->num_planes; p++) {
402 const VkImageSubresource image_subresource = {
403 .aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT << p,
404 .mipLevel = 0,
405 .arrayLayer = 0,
406 };
407 VkSubresourceLayout image_layout;
408 wsi->GetImageSubresourceLayout(chain->device, image->image,
409 &image_subresource, &image_layout);
410 image->sizes[p] = image_layout.size;
411 image->row_pitches[p] = image_layout.rowPitch;
412 image->offsets[p] = image_layout.offset;
413 }
414 } else {
415 const VkImageSubresource image_subresource = {
416 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
417 .mipLevel = 0,
418 .arrayLayer = 0,
419 };
420 VkSubresourceLayout image_layout;
421 wsi->GetImageSubresourceLayout(chain->device, image->image,
422 &image_subresource, &image_layout);
423
424 image->drm_modifier = DRM_FORMAT_MOD_INVALID;
425 image->num_planes = 1;
426 image->sizes[0] = reqs.size;
427 image->row_pitches[0] = image_layout.rowPitch;
428 image->offsets[0] = 0;
429 }
430
431 return VK_SUCCESS;
432 }
433
434 static VkResult
wsi_headless_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)435 wsi_headless_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
436 VkDevice device,
437 struct wsi_device *wsi_device,
438 const VkSwapchainCreateInfoKHR* pCreateInfo,
439 const VkAllocationCallbacks* pAllocator,
440 struct wsi_swapchain **swapchain_out)
441 {
442 struct wsi_headless_swapchain *chain;
443 VkResult result;
444
445 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
446
447 int num_images = pCreateInfo->minImageCount;
448
449 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
450 chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
451 if (chain == NULL)
452 return VK_ERROR_OUT_OF_HOST_MEMORY;
453
454 struct wsi_drm_image_params drm_params = {
455 .base.image_type = WSI_IMAGE_TYPE_DRM,
456 .same_gpu = true,
457 };
458
459 result = wsi_swapchain_init(wsi_device, &chain->base, device,
460 pCreateInfo, &drm_params.base, pAllocator);
461 if (result != VK_SUCCESS) {
462 vk_free(pAllocator, chain);
463 return result;
464 }
465
466 chain->base.destroy = wsi_headless_swapchain_destroy;
467 chain->base.get_wsi_image = wsi_headless_swapchain_get_wsi_image;
468 chain->base.acquire_next_image = wsi_headless_swapchain_acquire_next_image;
469 chain->base.queue_present = wsi_headless_swapchain_queue_present;
470 chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
471 chain->base.image_count = num_images;
472 chain->extent = pCreateInfo->imageExtent;
473 chain->vk_format = pCreateInfo->imageFormat;
474
475 result = wsi_configure_image(&chain->base, pCreateInfo,
476 0, &chain->base.image_info);
477 if (result != VK_SUCCESS) {
478 goto fail;
479 }
480 chain->base.image_info.create_mem = wsi_create_null_image_mem;
481
482
483 for (uint32_t i = 0; i < chain->base.image_count; i++) {
484 result = wsi_create_image(&chain->base, &chain->base.image_info,
485 &chain->images[i].base);
486 if (result != VK_SUCCESS)
487 return result;
488
489 chain->images[i].busy = false;
490 }
491
492 *swapchain_out = &chain->base;
493
494 return VK_SUCCESS;
495
496 fail:
497 wsi_headless_swapchain_destroy(&chain->base, pAllocator);
498
499 return result;
500 }
501
502 VkResult
wsi_headless_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)503 wsi_headless_init_wsi(struct wsi_device *wsi_device,
504 const VkAllocationCallbacks *alloc,
505 VkPhysicalDevice physical_device)
506 {
507 struct wsi_headless *wsi;
508 VkResult result;
509
510 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
511 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
512 if (!wsi) {
513 result = VK_ERROR_OUT_OF_HOST_MEMORY;
514 goto fail;
515 }
516
517 wsi->physical_device = physical_device;
518 wsi->alloc = alloc;
519 wsi->wsi = wsi_device;
520
521 wsi->base.get_support = wsi_headless_surface_get_support;
522 wsi->base.get_capabilities2 = wsi_headless_surface_get_capabilities2;
523 wsi->base.get_formats = wsi_headless_surface_get_formats;
524 wsi->base.get_formats2 = wsi_headless_surface_get_formats2;
525 wsi->base.get_present_modes = wsi_headless_surface_get_present_modes;
526 wsi->base.get_present_rectangles = wsi_headless_surface_get_present_rectangles;
527 wsi->base.create_swapchain = wsi_headless_surface_create_swapchain;
528
529 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = &wsi->base;
530
531 return VK_SUCCESS;
532
533 fail:
534 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] = NULL;
535
536 return result;
537 }
538
539 void
wsi_headless_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)540 wsi_headless_finish_wsi(struct wsi_device *wsi_device,
541 const VkAllocationCallbacks *alloc)
542 {
543 struct wsi_headless *wsi =
544 (struct wsi_headless *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS];
545 if (!wsi)
546 return;
547
548 vk_free(alloc, wsi);
549 }
550
wsi_CreateHeadlessSurfaceEXT(VkInstance _instance,const VkHeadlessSurfaceCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)551 VkResult wsi_CreateHeadlessSurfaceEXT(
552 VkInstance _instance,
553 const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
554 const VkAllocationCallbacks* pAllocator,
555 VkSurfaceKHR* pSurface)
556 {
557 VK_FROM_HANDLE(vk_instance, instance, _instance);
558 VkIcdSurfaceHeadless *surface;
559
560 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
561 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
562 if (surface == NULL)
563 return VK_ERROR_OUT_OF_HOST_MEMORY;
564
565 surface->base.platform = VK_ICD_WSI_PLATFORM_HEADLESS;
566
567 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
568 return VK_SUCCESS;
569 }
570