1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/VulkanInterface.h"
9 #include "tools/gpu/vk/VkTestMemoryAllocator.h"
10 #include "tools/gpu/vk/VkTestUtils.h"
11
12 #ifdef SK_VULKAN
13
14 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
15 #if defined _WIN32
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll
17 #elif defined SK_BUILD_FOR_MAC
18 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib
19 #else
20 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvulkan.so
21 #define SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP libvulkan.so.1
22 #endif
23 #endif
24
25 #define STRINGIFY2(S) #S
26 #define STRINGIFY(S) STRINGIFY2(S)
27
28 #include <algorithm>
29
30 #if defined(__GLIBC__)
31 #include <execinfo.h>
32 #endif
33 #include "include/gpu/vk/VulkanBackendContext.h"
34 #include "include/gpu/vk/VulkanExtensions.h"
35 #include "src/base/SkAutoMalloc.h"
36 #include "tools/library/LoadDynamicLibrary.h"
37
38 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
39 #include <sanitizer/lsan_interface.h>
40 #endif
41
42 using namespace skia_private;
43
44 namespace sk_gpu_test {
45
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc)46 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) {
47 static void* vkLib = nullptr;
48 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
49 if (!vkLib) {
50 vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME));
51 if (!vkLib) {
52 // vulkaninfo tries to load the library from two places, so we do as well
53 // https://github.com/KhronosGroup/Vulkan-Tools/blob/078d44e4664b7efa0b6c96ebced1995c4425d57a/vulkaninfo/vulkaninfo.h#L249
54 #ifdef SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP
55 vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP));
56 if (!vkLib) {
57 return false;
58 }
59 #else
60 return false;
61 #endif
62 }
63 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
64 "vkGetInstanceProcAddr");
65 }
66 if (!localInstProc) {
67 return false;
68 }
69 *instProc = localInstProc;
70 return true;
71 }
72
73 ////////////////////////////////////////////////////////////////////////////////
74 // Helper code to set up Vulkan context objects
75
76 #ifdef SK_ENABLE_VK_LAYERS
77 const char* kDebugLayerNames[] = {
78 // single merged layer
79 "VK_LAYER_KHRONOS_validation",
80 // not included in standard_validation
81 //"VK_LAYER_LUNARG_api_dump",
82 //"VK_LAYER_LUNARG_vktrace",
83 //"VK_LAYER_LUNARG_screenshot",
84 };
85
remove_patch_version(uint32_t specVersion)86 static uint32_t remove_patch_version(uint32_t specVersion) {
87 return (specVersion >> 12) << 12;
88 }
89
90 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)91 static int should_include_debug_layer(const char* layerName,
92 uint32_t layerCount, VkLayerProperties* layers,
93 uint32_t version) {
94 for (uint32_t i = 0; i < layerCount; ++i) {
95 if (!strcmp(layerName, layers[i].layerName)) {
96 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
97 // layer was written against a version that isn't older than the version of Vulkan we're
98 // using so that it has all the api entry points.
99 if (version <= remove_patch_version(layers[i].specVersion)) {
100 return i;
101 }
102 return -1;
103 }
104
105 }
106 return -1;
107 }
108
print_backtrace()109 static void print_backtrace() {
110 #if defined(__GLIBC__)
111 void* stack[64];
112 int count = backtrace(stack, std::size(stack));
113 backtrace_symbols_fd(stack, count, 2);
114 #else
115 // Please add implementations for other platforms.
116 #endif
117 }
118
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)119 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
120 VkDebugReportFlagsEXT flags,
121 VkDebugReportObjectTypeEXT objectType,
122 uint64_t object,
123 size_t location,
124 int32_t messageCode,
125 const char* pLayerPrefix,
126 const char* pMessage,
127 void* pUserData) {
128 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
129 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
130 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
131 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
132 return VK_FALSE;
133 }
134 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
135 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
136 strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
137 return VK_FALSE;
138 }
139 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
140 print_backtrace();
141 SkDEBUGFAIL("Vulkan debug layer error");
142 return VK_TRUE; // skip further layers
143 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
144 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
145 print_backtrace();
146 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
147 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
148 print_backtrace();
149 } else {
150 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
151 }
152 return VK_FALSE;
153 }
154 #endif
155
156 #define ACQUIRE_VK_INST_PROC_LOCAL(name, instance) \
157 PFN_vk##name grVk##name = \
158 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
159 do { \
160 if (grVk##name == nullptr) { \
161 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
162 return false; \
163 } \
164 } while (0)
165
166 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_extension(const char * extensionName)167 static bool should_include_extension(const char* extensionName) {
168 const char* kValidExtensions[] = {
169 // single merged layer
170 VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME,
171 VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME,
172 VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
173 VK_EXT_DEVICE_FAULT_EXTENSION_NAME,
174 VK_EXT_FRAME_BOUNDARY_EXTENSION_NAME,
175 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
176 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
177 VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME,
178 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
179 VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
180 VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
181 VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
182 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
183 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
184 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
185 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
186 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
187 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
188 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
189 VK_KHR_SURFACE_EXTENSION_NAME,
190 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
191 // Below are all platform specific extensions. The name macros like we use above are
192 // all defined in platform specific vulkan headers. We currently don't include these
193 // headers as they are a little bit of a pain (e.g. windows headers requires including
194 // <windows.h> which causes all sorts of fun annoyances/problems. So instead we are
195 // just listing the strings these macros are defined to. This really shouldn't cause
196 // any long term issues as the chances of the strings connected to the name macros
197 // changing is next to zero.
198 "VK_KHR_win32_surface", // VK_KHR_WIN32_SURFACE_EXTENSION_NAME
199 "VK_KHR_xcb_surface", // VK_KHR_XCB_SURFACE_EXTENSION_NAME,
200 "VK_ANDROID_external_memory_android_hardware_buffer",
201 // VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
202 "VK_KHR_android_surface", // VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
203 };
204
205 for (size_t i = 0; i < std::size(kValidExtensions); ++i) {
206 if (!strcmp(extensionName, kValidExtensions[i])) {
207 return true;
208 }
209 }
210 return false;
211 }
212
init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,uint32_t specVersion,TArray<VkExtensionProperties> * instanceExtensions,TArray<VkLayerProperties> * instanceLayers)213 static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,
214 uint32_t specVersion,
215 TArray<VkExtensionProperties>* instanceExtensions,
216 TArray<VkLayerProperties>* instanceLayers) {
217 if (getInstProc == nullptr) {
218 return false;
219 }
220
221 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
222 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE);
223
224 VkResult res;
225 uint32_t layerCount = 0;
226 #ifdef SK_ENABLE_VK_LAYERS
227 // instance layers
228 res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr);
229 if (VK_SUCCESS != res) {
230 return false;
231 }
232 VkLayerProperties* layers = new VkLayerProperties[layerCount];
233 res = grVkEnumerateInstanceLayerProperties(&layerCount, layers);
234 if (VK_SUCCESS != res) {
235 delete[] layers;
236 return false;
237 }
238
239 uint32_t nonPatchVersion = remove_patch_version(specVersion);
240 for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
241 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
242 nonPatchVersion);
243 if (idx != -1) {
244 instanceLayers->push_back() = layers[idx];
245 }
246 }
247 delete[] layers;
248 #endif
249
250 // instance extensions
251 // via Vulkan implementation and implicitly enabled layers
252 {
253 uint32_t extensionCount = 0;
254 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
255 if (VK_SUCCESS != res) {
256 return false;
257 }
258 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
259 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
260 if (VK_SUCCESS != res) {
261 delete[] extensions;
262 return false;
263 }
264 for (uint32_t i = 0; i < extensionCount; ++i) {
265 if (should_include_extension(extensions[i].extensionName)) {
266 instanceExtensions->push_back() = extensions[i];
267 }
268 }
269 delete [] extensions;
270 }
271
272 // via explicitly enabled layers
273 layerCount = instanceLayers->size();
274 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
275 uint32_t extensionCount = 0;
276 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
277 &extensionCount, nullptr);
278 if (VK_SUCCESS != res) {
279 return false;
280 }
281 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
282 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
283 &extensionCount, extensions);
284 if (VK_SUCCESS != res) {
285 delete[] extensions;
286 return false;
287 }
288 for (uint32_t i = 0; i < extensionCount; ++i) {
289 if (should_include_extension(extensions[i].extensionName)) {
290 instanceExtensions->push_back() = extensions[i];
291 }
292 }
293 delete[] extensions;
294 }
295
296 return true;
297 }
298
299 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
300
init_device_extensions_and_layers(const skgpu::VulkanGetProc & getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,TArray<VkExtensionProperties> * deviceExtensions,TArray<VkLayerProperties> * deviceLayers)301 static bool init_device_extensions_and_layers(const skgpu::VulkanGetProc& getProc,
302 uint32_t specVersion, VkInstance inst,
303 VkPhysicalDevice physDev,
304 TArray<VkExtensionProperties>* deviceExtensions,
305 TArray<VkLayerProperties>* deviceLayers) {
306 if (getProc == nullptr) {
307 return false;
308 }
309
310 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
311 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
312
313 if (!EnumerateDeviceExtensionProperties ||
314 !EnumerateDeviceLayerProperties) {
315 return false;
316 }
317
318 VkResult res;
319 // device layers
320 uint32_t layerCount = 0;
321 #ifdef SK_ENABLE_VK_LAYERS
322 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
323 if (VK_SUCCESS != res) {
324 return false;
325 }
326 VkLayerProperties* layers = new VkLayerProperties[layerCount];
327 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
328 if (VK_SUCCESS != res) {
329 delete[] layers;
330 return false;
331 }
332
333 uint32_t nonPatchVersion = remove_patch_version(specVersion);
334 for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
335 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
336 nonPatchVersion);
337 if (idx != -1) {
338 deviceLayers->push_back() = layers[idx];
339 }
340 }
341 delete[] layers;
342 #endif
343
344 // device extensions
345 // via Vulkan implementation and implicitly enabled layers
346 {
347 uint32_t extensionCount = 0;
348 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
349 if (VK_SUCCESS != res) {
350 return false;
351 }
352 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
353 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
354 if (VK_SUCCESS != res) {
355 delete[] extensions;
356 return false;
357 }
358 for (uint32_t i = 0; i < extensionCount; ++i) {
359 if (should_include_extension(extensions[i].extensionName)) {
360 deviceExtensions->push_back() = extensions[i];
361 }
362 }
363 delete[] extensions;
364 }
365
366 // via explicitly enabled layers
367 layerCount = deviceLayers->size();
368 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
369 uint32_t extensionCount = 0;
370 res = EnumerateDeviceExtensionProperties(physDev,
371 (*deviceLayers)[layerIndex].layerName,
372 &extensionCount, nullptr);
373 if (VK_SUCCESS != res) {
374 return false;
375 }
376 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
377 res = EnumerateDeviceExtensionProperties(physDev,
378 (*deviceLayers)[layerIndex].layerName,
379 &extensionCount, extensions);
380 if (VK_SUCCESS != res) {
381 delete[] extensions;
382 return false;
383 }
384 for (uint32_t i = 0; i < extensionCount; ++i) {
385 if (should_include_extension(extensions[i].extensionName)) {
386 deviceExtensions->push_back() = extensions[i];
387 }
388 }
389 delete[] extensions;
390 }
391
392 return true;
393 }
394
395 #define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \
396 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name))
397
398 #define ACQUIRE_VK_INST_PROC(name, instance) \
399 PFN_vk##name grVk##name = \
400 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
401 do { \
402 if (grVk##name == nullptr) { \
403 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
404 if (inst != VK_NULL_HANDLE) { \
405 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
406 } \
407 return false; \
408 } \
409 } while (0)
410
411 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
412 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
413
414 #define ACQUIRE_VK_PROC(name, instance, device) \
415 PFN_vk##name grVk##name = \
416 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
417 do { \
418 if (grVk##name == nullptr) { \
419 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
420 if (inst != VK_NULL_HANDLE) { \
421 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
422 } \
423 return false; \
424 } \
425 } while (0)
426
427 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
428 PFN_vk##name grVk##name = \
429 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
430 do { \
431 if (grVk##name == nullptr) { \
432 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
433 return false; \
434 } \
435 } while (0)
436
destroy_instance(PFN_vkGetInstanceProcAddr getInstProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)437 static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst,
438 VkDebugReportCallbackEXT* debugCallback,
439 bool hasDebugExtension) {
440 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
441 ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst);
442 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
443 *debugCallback = VK_NULL_HANDLE;
444 }
445 ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst);
446 grVkDestroyInstance(inst, nullptr);
447 return true;
448 }
449
setup_features(const skgpu::VulkanGetProc & getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)450 static bool setup_features(const skgpu::VulkanGetProc& getProc, VkInstance inst,
451 VkPhysicalDevice physDev, uint32_t physDeviceVersion,
452 skgpu::VulkanExtensions* extensions, VkPhysicalDeviceFeatures2* features,
453 bool isProtected) {
454 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
455 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
456
457 // Setup all extension feature structs we may want to use.
458 void** tailPNext = &features->pNext;
459
460 // If |isProtected| is given, attach that first
461 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
462 if (isProtected) {
463 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
464 protectedMemoryFeatures =
465 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
466 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
467 protectedMemoryFeatures->sType =
468 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
469 protectedMemoryFeatures->pNext = nullptr;
470 *tailPNext = protectedMemoryFeatures;
471 tailPNext = &protectedMemoryFeatures->pNext;
472 }
473
474 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
475 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
476 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
477 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
478 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
479 blend->pNext = nullptr;
480 *tailPNext = blend;
481 tailPNext = &blend->pNext;
482 }
483
484 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
485 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
486 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
487 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
488 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
489 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
490 ycbcrFeature->pNext = nullptr;
491 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
492 *tailPNext = ycbcrFeature;
493 tailPNext = &ycbcrFeature->pNext;
494 }
495
496 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
497 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
498 grVkGetPhysicalDeviceFeatures2(physDev, features);
499 } else {
500 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
501 1));
502 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
503 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
504 }
505
506 if (isProtected) {
507 if (!protectedMemoryFeatures->protectedMemory) {
508 return false;
509 }
510 }
511 return true;
512 // If we want to disable any extension features do so here.
513 }
514
CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,skgpu::VulkanBackendContext * ctx,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,const CanPresentFn & canPresent,bool isProtected)515 bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
516 skgpu::VulkanBackendContext* ctx,
517 skgpu::VulkanExtensions* extensions,
518 VkPhysicalDeviceFeatures2* features,
519 VkDebugReportCallbackEXT* debugCallback,
520 uint32_t* presentQueueIndexPtr,
521 const CanPresentFn& canPresent,
522 bool isProtected) {
523 VkResult err;
524
525 ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE);
526 uint32_t instanceVersion = 0;
527 if (!grVkEnumerateInstanceVersion) {
528 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
529 } else {
530 err = grVkEnumerateInstanceVersion(&instanceVersion);
531 if (err) {
532 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
533 return false;
534 }
535 }
536 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
537 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
538 SkDebugf("protected requires vk instance version 1.1\n");
539 return false;
540 }
541
542 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
543 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
544 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
545 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
546 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
547 // since that is the highest vulkan version.
548 apiVersion = VK_MAKE_VERSION(1, 1, 0);
549 }
550
551 instanceVersion = std::min(instanceVersion, apiVersion);
552
553 STArray<2, VkPhysicalDevice> physDevs;
554 VkDevice device;
555 VkInstance inst = VK_NULL_HANDLE;
556
557 const VkApplicationInfo app_info = {
558 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
559 nullptr, // pNext
560 "vktest", // pApplicationName
561 0, // applicationVersion
562 "vktest", // pEngineName
563 0, // engineVerison
564 apiVersion, // apiVersion
565 };
566
567 TArray<VkLayerProperties> instanceLayers;
568 TArray<VkExtensionProperties> instanceExtensions;
569
570 if (!init_instance_extensions_and_layers(getInstProc, instanceVersion,
571 &instanceExtensions,
572 &instanceLayers)) {
573 return false;
574 }
575
576 TArray<const char*> instanceLayerNames;
577 TArray<const char*> instanceExtensionNames;
578 for (int i = 0; i < instanceLayers.size(); ++i) {
579 instanceLayerNames.push_back(instanceLayers[i].layerName);
580 }
581 for (int i = 0; i < instanceExtensions.size(); ++i) {
582 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
583 }
584
585 const VkInstanceCreateInfo instance_create = {
586 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
587 nullptr, // pNext
588 0, // flags
589 &app_info, // pApplicationInfo
590 (uint32_t) instanceLayerNames.size(), // enabledLayerNameCount
591 instanceLayerNames.begin(), // ppEnabledLayerNames
592 (uint32_t) instanceExtensionNames.size(), // enabledExtensionNameCount
593 instanceExtensionNames.begin(), // ppEnabledExtensionNames
594 };
595
596 bool hasDebugExtension = false;
597
598 ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE);
599 err = grVkCreateInstance(&instance_create, nullptr, &inst);
600 if (err < 0) {
601 SkDebugf("vkCreateInstance failed: %d\n", err);
602 return false;
603 }
604
605 ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst);
606 auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name,
607 VkInstance instance, VkDevice device) {
608 if (device != VK_NULL_HANDLE) {
609 return grVkGetDeviceProcAddr(device, proc_name);
610 }
611 return getInstProc(instance, proc_name);
612 };
613
614 #ifdef SK_ENABLE_VK_LAYERS
615 *debugCallback = VK_NULL_HANDLE;
616 for (int i = 0; i < instanceExtensionNames.size() && !hasDebugExtension; ++i) {
617 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
618 hasDebugExtension = true;
619 }
620 }
621 if (hasDebugExtension) {
622 // Setup callback creation information
623 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
624 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
625 callbackCreateInfo.pNext = nullptr;
626 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
627 VK_DEBUG_REPORT_WARNING_BIT_EXT |
628 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
629 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
630 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
631 callbackCreateInfo.pfnCallback = &DebugReportCallback;
632 callbackCreateInfo.pUserData = nullptr;
633
634 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
635 // Register the callback
636 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
637 }
638 #endif
639
640 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
641 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
642 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
643 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
644 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
645 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
646 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
647 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
648
649 uint32_t gpuCount;
650 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
651 if (err) {
652 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
653 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
654 return false;
655 }
656 if (!gpuCount) {
657 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
658 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
659 return false;
660 }
661 // Allocate enough storage for all available physical devices. We should be able to just ask for
662 // the first one, but a bug in RenderDoc (https://github.com/baldurk/renderdoc/issues/2766)
663 // will smash the stack if we do that.
664 physDevs.resize(gpuCount);
665 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, physDevs.data());
666 if (err) {
667 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
668 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
669 return false;
670 }
671 // We just use the first physical device.
672 // TODO: find best match for our needs
673 VkPhysicalDevice physDev = physDevs.front();
674
675 VkPhysicalDeviceProperties physDeviceProperties;
676 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
677 uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
678
679 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
680 SkDebugf("protected requires vk physical device version 1.1\n");
681 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
682 return false;
683 }
684
685 // query to get the initial queue props size
686 uint32_t queueCount;
687 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
688 if (!queueCount) {
689 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
690 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
691 return false;
692 }
693
694 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
695 // now get the actual queue props
696 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
697
698 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
699
700 // iterate to find the graphics queue
701 uint32_t graphicsQueueIndex = queueCount;
702 for (uint32_t i = 0; i < queueCount; i++) {
703 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
704 graphicsQueueIndex = i;
705 break;
706 }
707 }
708 if (graphicsQueueIndex == queueCount) {
709 SkDebugf("Could not find any supported graphics queues.\n");
710 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
711 return false;
712 }
713
714 // iterate to find the present queue, if needed
715 uint32_t presentQueueIndex = queueCount;
716 if (presentQueueIndexPtr && canPresent) {
717 for (uint32_t i = 0; i < queueCount; i++) {
718 if (canPresent(inst, physDev, i)) {
719 presentQueueIndex = i;
720 break;
721 }
722 }
723 if (presentQueueIndex == queueCount) {
724 SkDebugf("Could not find any supported present queues.\n");
725 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
726 return false;
727 }
728 *presentQueueIndexPtr = presentQueueIndex;
729 } else {
730 // Just setting this so we end up make a single queue for graphics since there was no
731 // request for a present queue.
732 presentQueueIndex = graphicsQueueIndex;
733 }
734
735 TArray<VkLayerProperties> deviceLayers;
736 TArray<VkExtensionProperties> deviceExtensions;
737 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
738 inst, physDev,
739 &deviceExtensions,
740 &deviceLayers)) {
741 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
742 return false;
743 }
744
745 TArray<const char*> deviceLayerNames;
746 TArray<const char*> deviceExtensionNames;
747 for (int i = 0; i < deviceLayers.size(); ++i) {
748 deviceLayerNames.push_back(deviceLayers[i].layerName);
749 }
750
751 for (int i = 0; i < deviceExtensions.size(); ++i) {
752 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
753 }
754
755 extensions->init(getProc, inst, physDev,
756 (uint32_t) instanceExtensionNames.size(),
757 instanceExtensionNames.begin(),
758 (uint32_t) deviceExtensionNames.size(),
759 deviceExtensionNames.begin());
760
761 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
762 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
763 features->pNext = nullptr;
764
765 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
766 void* pointerToFeatures = nullptr;
767 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
768 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
769 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
770 isProtected)) {
771 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
772 return false;
773 }
774
775 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
776 // the device creation will use that instead of the ppEnabledFeatures.
777 pointerToFeatures = features;
778 } else {
779 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
780 }
781
782 // this looks like it would slow things down,
783 // and we can't depend on it on all platforms
784 deviceFeatures->robustBufferAccess = VK_FALSE;
785
786 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
787 float queuePriorities[1] = { 0.0 };
788 // Here we assume no need for swapchain queue
789 // If one is needed, the client will need its own setup code
790 const VkDeviceQueueCreateInfo queueInfo[2] = {
791 {
792 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
793 nullptr, // pNext
794 flags, // VkDeviceQueueCreateFlags
795 graphicsQueueIndex, // queueFamilyIndex
796 1, // queueCount
797 queuePriorities, // pQueuePriorities
798
799 },
800 {
801 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
802 nullptr, // pNext
803 0, // VkDeviceQueueCreateFlags
804 presentQueueIndex, // queueFamilyIndex
805 1, // queueCount
806 queuePriorities, // pQueuePriorities
807 }
808 };
809 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
810
811 const VkDeviceCreateInfo deviceInfo = {
812 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
813 pointerToFeatures, // pNext
814 0, // VkDeviceCreateFlags
815 queueInfoCount, // queueCreateInfoCount
816 queueInfo, // pQueueCreateInfos
817 (uint32_t) deviceLayerNames.size(), // layerCount
818 deviceLayerNames.begin(), // ppEnabledLayerNames
819 (uint32_t) deviceExtensionNames.size(), // extensionCount
820 deviceExtensionNames.begin(), // ppEnabledExtensionNames
821 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
822 };
823
824 {
825 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
826 // skia:8712
827 __lsan::ScopedDisabler lsanDisabler;
828 #endif
829 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
830 }
831 if (err) {
832 SkDebugf("CreateDevice failed: %d\n", err);
833 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
834 return false;
835 }
836
837 VkQueue queue;
838 if (isProtected) {
839 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
840 SkASSERT(grVkGetDeviceQueue2 != nullptr);
841 VkDeviceQueueInfo2 queue_info2 = {
842 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
843 nullptr, // pNext
844 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
845 graphicsQueueIndex, // queueFamilyIndex
846 0 // queueIndex
847 };
848 grVkGetDeviceQueue2(device, &queue_info2, &queue);
849 } else {
850 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
851 }
852
853 skgpu::VulkanInterface interface = skgpu::VulkanInterface(
854 getProc, inst, device, instanceVersion, physDeviceVersion, extensions);
855 SkASSERT(interface.validate(instanceVersion, physDeviceVersion, extensions));
856
857 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = VkTestMemoryAllocator::Make(
858 inst, physDev, device, physDeviceVersion, extensions, &interface);
859
860 ctx->fInstance = inst;
861 ctx->fPhysicalDevice = physDev;
862 ctx->fDevice = device;
863 ctx->fQueue = queue;
864 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
865 ctx->fMaxAPIVersion = apiVersion;
866 ctx->fVkExtensions = extensions;
867 ctx->fDeviceFeatures2 = features;
868 ctx->fGetProc = getProc;
869 ctx->fProtectedContext = skgpu::Protected(isProtected);
870 ctx->fMemoryAllocator = memoryAllocator;
871
872 return true;
873 }
874
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)875 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
876 // All Vulkan structs that could be part of the features chain will start with the
877 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
878 // so we can get access to the pNext for the next struct.
879 struct CommonVulkanHeader {
880 VkStructureType sType;
881 void* pNext;
882 };
883
884 void* pNext = features->pNext;
885 while (pNext) {
886 void* current = pNext;
887 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
888 sk_free(current);
889 }
890 }
891
892 } // namespace sk_gpu_test
893
894 #endif
895