xref: /aosp_15_r20/frameworks/base/libs/hwui/renderthread/VulkanManager.cpp (revision d57664e9bc4670b3ecf6748a746a57c557b6bc9e)
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VulkanManager.h"
18 
19 #include <EGL/egl.h>
20 #include <EGL/eglext.h>
21 #include <android/sync.h>
22 #include <gui/TraceUtils.h>
23 #include <include/gpu/ganesh/GrBackendSemaphore.h>
24 #include <include/gpu/ganesh/GrBackendSurface.h>
25 #include <include/gpu/ganesh/GrDirectContext.h>
26 #include <include/gpu/ganesh/GrTypes.h>
27 #include <include/gpu/ganesh/SkSurfaceGanesh.h>
28 #include <include/gpu/ganesh/vk/GrVkBackendSemaphore.h>
29 #include <include/gpu/ganesh/vk/GrVkBackendSurface.h>
30 #include <include/gpu/ganesh/vk/GrVkDirectContext.h>
31 #include <include/gpu/ganesh/vk/GrVkTypes.h>
32 #include <include/gpu/vk/VulkanBackendContext.h>
33 #include <ui/FatVector.h>
34 
35 #include <sstream>
36 
37 #include "Properties.h"
38 #include "RenderThread.h"
39 #include "pipeline/skia/ShaderCache.h"
40 #include "renderstate/RenderState.h"
41 
42 namespace android {
43 namespace uirenderer {
44 namespace renderthread {
45 
46 // Not all of these are strictly required, but are all enabled if present.
47 static std::array<std::string_view, 23> sEnableExtensions{
48         VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
49         VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
50         VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
51         VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
52         VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
53         VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
54         VK_KHR_MAINTENANCE1_EXTENSION_NAME,
55         VK_KHR_MAINTENANCE2_EXTENSION_NAME,
56         VK_KHR_MAINTENANCE3_EXTENSION_NAME,
57         VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
58         VK_KHR_SURFACE_EXTENSION_NAME,
59         VK_KHR_SWAPCHAIN_EXTENSION_NAME,
60         VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME,
61         VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
62         VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
63         VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
64         VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
65         VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
66         VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
67         VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME,
68         VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME,
69         VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME,
70         VK_EXT_DEVICE_FAULT_EXTENSION_NAME,
71 };
72 
shouldEnableExtension(const std::string_view & extension)73 static bool shouldEnableExtension(const std::string_view& extension) {
74     for (const auto& it : sEnableExtensions) {
75         if (it == extension) {
76             return true;
77         }
78     }
79     return false;
80 }
81 
free_features_extensions_structs(const VkPhysicalDeviceFeatures2 & features)82 static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
83     // All Vulkan structs that could be part of the features chain will start with the
84     // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
85     // so we can get access to the pNext for the next struct.
86     struct CommonVulkanHeader {
87         VkStructureType sType;
88         void* pNext;
89     };
90 
91     void* pNext = features.pNext;
92     while (pNext) {
93         void* current = pNext;
94         pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
95         free(current);
96     }
97 }
98 
99 #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
100 #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
101 #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
102 
103 // cache a weakptr to the context to enable a second thread to share the same vulkan state
104 static wp<VulkanManager> sWeakInstance = nullptr;
105 static std::mutex sLock;
106 
getInstance()107 sp<VulkanManager> VulkanManager::getInstance() {
108     std::lock_guard _lock{sLock};
109     sp<VulkanManager> vulkanManager = sWeakInstance.promote();
110     if (!vulkanManager.get()) {
111         vulkanManager = new VulkanManager();
112         sWeakInstance = vulkanManager;
113     }
114 
115     return vulkanManager;
116 }
117 
peekInstance()118 sp<VulkanManager> VulkanManager::peekInstance() {
119     std::lock_guard _lock{sLock};
120     return sWeakInstance.promote();
121 }
122 
~VulkanManager()123 VulkanManager::~VulkanManager() {
124     if (mDevice != VK_NULL_HANDLE) {
125         mDeviceWaitIdle(mDevice);
126         mDestroyDevice(mDevice, nullptr);
127     }
128 
129     if (mInstance != VK_NULL_HANDLE) {
130         mDestroyInstance(mInstance, nullptr);
131     }
132 
133     mGraphicsQueue = VK_NULL_HANDLE;
134     mAHBUploadQueue = VK_NULL_HANDLE;
135     mDevice = VK_NULL_HANDLE;
136     mPhysicalDevice = VK_NULL_HANDLE;
137     mInstance = VK_NULL_HANDLE;
138     mInstanceExtensionsOwner.clear();
139     mInstanceExtensions.clear();
140     mDeviceExtensionsOwner.clear();
141     mDeviceExtensions.clear();
142     free_features_extensions_structs(mPhysicalDeviceFeatures2);
143     mPhysicalDeviceFeatures2 = {};
144 }
145 
setupDevice(skgpu::VulkanExtensions & grExtensions,VkPhysicalDeviceFeatures2 & features)146 void VulkanManager::setupDevice(skgpu::VulkanExtensions& grExtensions,
147                                 VkPhysicalDeviceFeatures2& features) {
148     VkResult err;
149 
150     constexpr VkApplicationInfo app_info = {
151             VK_STRUCTURE_TYPE_APPLICATION_INFO,  // sType
152             nullptr,                             // pNext
153             "android framework",                 // pApplicationName
154             0,                                   // applicationVersion
155             "android framework",                 // pEngineName
156             0,                                   // engineVerison
157             mAPIVersion,                         // apiVersion
158     };
159 
160     {
161         GET_PROC(EnumerateInstanceExtensionProperties);
162 
163         uint32_t extensionCount = 0;
164         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
165         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
166         mInstanceExtensionsOwner.resize(extensionCount);
167         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
168                                                     mInstanceExtensionsOwner.data());
169         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
170         bool hasKHRSurfaceExtension = false;
171         bool hasKHRAndroidSurfaceExtension = false;
172         for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
173             if (!shouldEnableExtension(extension.extensionName)) {
174                 ALOGV("Not enabling instance extension %s", extension.extensionName);
175                 continue;
176             }
177             ALOGV("Enabling instance extension %s", extension.extensionName);
178             mInstanceExtensions.push_back(extension.extensionName);
179             if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
180                 hasKHRSurfaceExtension = true;
181             }
182             if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
183                 hasKHRAndroidSurfaceExtension = true;
184             }
185         }
186         LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
187     }
188 
189     const VkInstanceCreateInfo instance_create = {
190             VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,  // sType
191             nullptr,                                 // pNext
192             0,                                       // flags
193             &app_info,                               // pApplicationInfo
194             0,                                       // enabledLayerNameCount
195             nullptr,                                 // ppEnabledLayerNames
196             (uint32_t)mInstanceExtensions.size(),    // enabledExtensionNameCount
197             mInstanceExtensions.data(),              // ppEnabledExtensionNames
198     };
199 
200     GET_PROC(CreateInstance);
201     err = mCreateInstance(&instance_create, nullptr, &mInstance);
202     LOG_ALWAYS_FATAL_IF(err < 0);
203 
204     GET_INST_PROC(CreateDevice);
205     GET_INST_PROC(DestroyInstance);
206     GET_INST_PROC(EnumerateDeviceExtensionProperties);
207     GET_INST_PROC(EnumeratePhysicalDevices);
208     GET_INST_PROC(GetPhysicalDeviceFeatures2);
209     GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
210     GET_INST_PROC(GetPhysicalDeviceProperties);
211     GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties2);
212 
213     uint32_t gpuCount;
214     LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
215     LOG_ALWAYS_FATAL_IF(!gpuCount);
216     // Just returning the first physical device instead of getting the whole array. Since there
217     // should only be one device on android.
218     gpuCount = 1;
219     err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
220     // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
221     LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
222 
223     VkPhysicalDeviceProperties physDeviceProperties;
224     mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
225     LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
226     mDriverVersion = physDeviceProperties.driverVersion;
227 
228     // query to get the initial queue props size
229     uint32_t queueCount = 0;
230     mGetPhysicalDeviceQueueFamilyProperties2(mPhysicalDevice, &queueCount, nullptr);
231     LOG_ALWAYS_FATAL_IF(!queueCount);
232 
233     // now get the actual queue props
234     std::unique_ptr<VkQueueFamilyProperties2[]>
235             queueProps(new VkQueueFamilyProperties2[queueCount]);
236     // query the global priority, this ignored if VK_EXT_global_priority isn't supported
237     std::vector<VkQueueFamilyGlobalPriorityPropertiesEXT> queuePriorityProps(queueCount);
238     for (uint32_t i = 0; i < queueCount; i++) {
239         queuePriorityProps[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT;
240         queuePriorityProps[i].pNext = nullptr;
241         queueProps[i].pNext = &queuePriorityProps[i];
242     }
243     mGetPhysicalDeviceQueueFamilyProperties2(mPhysicalDevice, &queueCount, queueProps.get());
244 
245     constexpr auto kRequestedQueueCount = 2;
246 
247     // iterate to find the graphics queue
248     mGraphicsQueueIndex = queueCount;
249     for (uint32_t i = 0; i < queueCount; i++) {
250         if (queueProps[i].queueFamilyProperties.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
251             mGraphicsQueueIndex = i;
252             LOG_ALWAYS_FATAL_IF(
253                     queueProps[i].queueFamilyProperties.queueCount < kRequestedQueueCount);
254             break;
255         }
256     }
257     LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
258 
259     {
260         uint32_t extensionCount = 0;
261         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
262                                                   nullptr);
263         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
264         mDeviceExtensionsOwner.resize(extensionCount);
265         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
266                                                   mDeviceExtensionsOwner.data());
267         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
268         bool hasKHRSwapchainExtension = false;
269         for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
270             if (!shouldEnableExtension(extension.extensionName)) {
271                 ALOGV("Not enabling device extension %s", extension.extensionName);
272                 continue;
273             }
274             ALOGV("Enabling device extension %s", extension.extensionName);
275             mDeviceExtensions.push_back(extension.extensionName);
276             if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
277                 hasKHRSwapchainExtension = true;
278             }
279         }
280         LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
281     }
282 
283     auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
284         if (device != VK_NULL_HANDLE) {
285             return vkGetDeviceProcAddr(device, proc_name);
286         }
287         return vkGetInstanceProcAddr(instance, proc_name);
288     };
289 
290     grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
291                       mInstanceExtensions.data(), mDeviceExtensions.size(),
292                       mDeviceExtensions.data());
293 
294     LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
295 
296     memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
297     features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
298     features.pNext = nullptr;
299 
300     // Setup all extension feature structs we may want to use.
301     void** tailPNext = &features.pNext;
302 
303     if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
304         VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
305         blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
306                 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
307         LOG_ALWAYS_FATAL_IF(!blend);
308         blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
309         blend->pNext = nullptr;
310         *tailPNext = blend;
311         tailPNext = &blend->pNext;
312     }
313 
314     VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
315     ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
316             sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
317     LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
318     ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
319     ycbcrFeature->pNext = nullptr;
320     *tailPNext = ycbcrFeature;
321     tailPNext = &ycbcrFeature->pNext;
322 
323     if (grExtensions.hasExtension(VK_EXT_DEVICE_FAULT_EXTENSION_NAME, 1)) {
324         VkPhysicalDeviceFaultFeaturesEXT* deviceFaultFeatures =
325                 new VkPhysicalDeviceFaultFeaturesEXT;
326         deviceFaultFeatures->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT;
327         deviceFaultFeatures->pNext = nullptr;
328         *tailPNext = deviceFaultFeatures;
329         tailPNext = &deviceFaultFeatures->pNext;
330     }
331 
332     if (grExtensions.hasExtension(VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME, 1)) {
333         VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT* formatFeatures =
334                 new VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT;
335         formatFeatures->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT;
336         formatFeatures->pNext = nullptr;
337         *tailPNext = formatFeatures;
338         tailPNext = &formatFeatures->pNext;
339     }
340 
341     VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT* globalPriorityQueryFeatures =
342             new VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT;
343     globalPriorityQueryFeatures->sType =
344             VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT;
345     globalPriorityQueryFeatures->pNext = nullptr;
346     globalPriorityQueryFeatures->globalPriorityQuery = false;
347     *tailPNext = globalPriorityQueryFeatures;
348     tailPNext = &globalPriorityQueryFeatures->pNext;
349 
350     // query to get the physical device features
351     mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
352     // this looks like it would slow things down,
353     // and we can't depend on it on all platforms
354     features.features.robustBufferAccess = VK_FALSE;
355 
356     float queuePriorities[kRequestedQueueCount] = {0.0};
357 
358     void* queueNextPtr = nullptr;
359 
360     VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
361 
362     if (Properties::contextPriority != 0 &&
363         grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
364         VkQueueGlobalPriorityEXT globalPriority;
365         switch (Properties::contextPriority) {
366             case EGL_CONTEXT_PRIORITY_LOW_IMG:
367                 globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
368                 break;
369             case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
370                 globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
371                 break;
372             case EGL_CONTEXT_PRIORITY_HIGH_IMG:
373                 globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
374                 break;
375             default:
376                 LOG_ALWAYS_FATAL("Unsupported context priority");
377         }
378 
379         // check if the requested priority is reported by the query
380         bool attachGlobalPriority = false;
381         if (uirenderer::Properties::queryGlobalPriority &&
382             globalPriorityQueryFeatures->globalPriorityQuery) {
383             for (uint32_t i = 0; i < queuePriorityProps[mGraphicsQueueIndex].priorityCount; i++) {
384                 if (queuePriorityProps[mGraphicsQueueIndex].priorities[i] == globalPriority) {
385                     attachGlobalPriority = true;
386                     break;
387                 }
388             }
389         } else {
390             // Querying is not supported so attempt queue creation with requested priority anyways
391             // If the priority turns out not to be supported, the driver *may* fail with
392             // VK_ERROR_NOT_PERMITTED_KHR
393             attachGlobalPriority = true;
394         }
395 
396         if (attachGlobalPriority) {
397             memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
398             queuePriorityCreateInfo.sType =
399                     VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
400             queuePriorityCreateInfo.pNext = nullptr;
401             queuePriorityCreateInfo.globalPriority = globalPriority;
402             queueNextPtr = &queuePriorityCreateInfo;
403         } else {
404             // If globalPriorityQuery is enabled, attempting queue creation with an unsupported
405             // priority will return VK_ERROR_INITIALIZATION_FAILED.
406             //
407             // SysUI and Launcher will request HIGH when SF has RT but it is a known issue that
408             // upstream drm drivers currently lack a way to grant them the granular privileges
409             // they need for HIGH (but not RT) so they will fail queue creation.
410             // For now, drop the unsupported global priority request so that queue creation
411             // succeeds.
412             //
413             // Once that is fixed, this should probably be a fatal error indicating an improper
414             // request or an app needs to get the correct privileges.
415             ALOGW("Requested context priority is not supported by the queue");
416         }
417     }
418 
419     const VkDeviceQueueCreateInfo queueInfo = {
420             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,  // sType
421             queueNextPtr,                                // pNext
422             0,                                           // VkDeviceQueueCreateFlags
423             mGraphicsQueueIndex,                         // queueFamilyIndex
424             kRequestedQueueCount,                        // queueCount
425             queuePriorities,                             // pQueuePriorities
426     };
427 
428     const VkDeviceCreateInfo deviceInfo = {
429             VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,  // sType
430             &features,                             // pNext
431             0,                                     // VkDeviceCreateFlags
432             1,                                     // queueCreateInfoCount
433             &queueInfo,                            // pQueueCreateInfos
434             0,                                     // layerCount
435             nullptr,                               // ppEnabledLayerNames
436             (uint32_t)mDeviceExtensions.size(),    // extensionCount
437             mDeviceExtensions.data(),              // ppEnabledExtensionNames
438             nullptr,                               // ppEnabledFeatures
439     };
440 
441     LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
442 
443     GET_DEV_PROC(AllocateCommandBuffers);
444     GET_DEV_PROC(BeginCommandBuffer);
445     GET_DEV_PROC(CmdPipelineBarrier);
446     GET_DEV_PROC(CreateCommandPool);
447     GET_DEV_PROC(CreateFence);
448     GET_DEV_PROC(CreateSemaphore);
449     GET_DEV_PROC(DestroyCommandPool);
450     GET_DEV_PROC(DestroyDevice);
451     GET_DEV_PROC(DestroyFence);
452     GET_DEV_PROC(DestroySemaphore);
453     GET_DEV_PROC(DeviceWaitIdle);
454     GET_DEV_PROC(EndCommandBuffer);
455     GET_DEV_PROC(FreeCommandBuffers);
456     GET_DEV_PROC(GetDeviceQueue);
457     GET_DEV_PROC(GetSemaphoreFdKHR);
458     GET_DEV_PROC(ImportSemaphoreFdKHR);
459     GET_DEV_PROC(QueueSubmit);
460     GET_DEV_PROC(QueueWaitIdle);
461     GET_DEV_PROC(ResetCommandBuffer);
462     GET_DEV_PROC(ResetFences);
463     GET_DEV_PROC(WaitForFences);
464     GET_DEV_PROC(FrameBoundaryANDROID);
465 }
466 
initialize()467 void VulkanManager::initialize() {
468     std::call_once(mInitFlag, [&] {
469         GET_PROC(EnumerateInstanceVersion);
470         uint32_t instanceVersion;
471         LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
472         LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
473 
474         this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
475 
476         mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
477         mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 1, &mAHBUploadQueue);
478 
479         if (Properties::enablePartialUpdates && Properties::useBufferAge) {
480             mSwapBehavior = SwapBehavior::BufferAge;
481         }
482 
483         mInitialized = true;
484     });
485 }
486 
487 namespace {
onVkDeviceFault(const std::string & contextLabel,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)488 void onVkDeviceFault(const std::string& contextLabel, const std::string& description,
489                      const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
490                      const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
491                      const std::vector<std::byte>& vendorBinaryData) {
492     // The final crash string should contain as much differentiating info as possible, up to 1024
493     // bytes. As this final message is constructed, the same information is also dumped to the logs
494     // but in a more verbose format. Building the crash string is unsightly, so the clearer logging
495     // statement is always placed first to give context.
496     ALOGE("VK_ERROR_DEVICE_LOST (%s context): %s", contextLabel.c_str(), description.c_str());
497     std::stringstream crashMsg;
498     crashMsg << "VK_ERROR_DEVICE_LOST (" << contextLabel;
499 
500     if (!addressInfos.empty()) {
501         ALOGE("%zu VkDeviceFaultAddressInfoEXT:", addressInfos.size());
502         crashMsg << ", " << addressInfos.size() << " address info (";
503         for (VkDeviceFaultAddressInfoEXT addressInfo : addressInfos) {
504             ALOGE(" addressType:       %d", (int)addressInfo.addressType);
505             ALOGE("  reportedAddress:  %" PRIu64, addressInfo.reportedAddress);
506             ALOGE("  addressPrecision: %" PRIu64, addressInfo.addressPrecision);
507             crashMsg << addressInfo.addressType << ":"
508                      << addressInfo.reportedAddress << ":"
509                      << addressInfo.addressPrecision << ", ";
510         }
511         crashMsg.seekp(-2, crashMsg.cur);  // Move back to overwrite trailing ", "
512         crashMsg << ")";
513     }
514 
515     if (!vendorInfos.empty()) {
516         ALOGE("%zu VkDeviceFaultVendorInfoEXT:", vendorInfos.size());
517         crashMsg << ", " << vendorInfos.size() << " vendor info (";
518         for (VkDeviceFaultVendorInfoEXT vendorInfo : vendorInfos) {
519             ALOGE(" description:      %s", vendorInfo.description);
520             ALOGE("  vendorFaultCode: %" PRIu64, vendorInfo.vendorFaultCode);
521             ALOGE("  vendorFaultData: %" PRIu64, vendorInfo.vendorFaultData);
522             // Omit descriptions for individual vendor info structs in the crash string, as the
523             // fault code and fault data fields should be enough for clustering, and the verbosity
524             // isn't worth it. Additionally, vendors may just set the general description field of
525             // the overall fault to the description of the first element in this list, and that
526             // overall description will be placed at the end of the crash string.
527             crashMsg << vendorInfo.vendorFaultCode << ":"
528                      << vendorInfo.vendorFaultData << ", ";
529         }
530         crashMsg.seekp(-2, crashMsg.cur);  // Move back to overwrite trailing ", "
531         crashMsg << ")";
532     }
533 
534     if (!vendorBinaryData.empty()) {
535         // TODO: b/322830575 - Log in base64, or dump directly to a file that gets put in bugreports
536         ALOGE("%zu bytes of vendor-specific binary data (please notify Android's Core Graphics"
537               " Stack team if you observe this message).",
538               vendorBinaryData.size());
539         crashMsg << ", " << vendorBinaryData.size() << " bytes binary";
540     }
541 
542     crashMsg << "): " << description;
543     LOG_ALWAYS_FATAL("%s", crashMsg.str().c_str());
544 }
545 
deviceLostProcRenderThread(void * callbackContext,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)546 void deviceLostProcRenderThread(void* callbackContext, const std::string& description,
547                                 const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
548                                 const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
549                                 const std::vector<std::byte>& vendorBinaryData) {
550     onVkDeviceFault("RenderThread", description, addressInfos, vendorInfos, vendorBinaryData);
551 }
deviceLostProcUploadThread(void * callbackContext,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)552 void deviceLostProcUploadThread(void* callbackContext, const std::string& description,
553                                 const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
554                                 const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
555                                 const std::vector<std::byte>& vendorBinaryData) {
556     onVkDeviceFault("UploadThread", description, addressInfos, vendorInfos, vendorBinaryData);
557 }
558 }  // anonymous namespace
559 
onGrContextReleased(void * context)560 static void onGrContextReleased(void* context) {
561     VulkanManager* manager = (VulkanManager*)context;
562     manager->decStrong((void*)onGrContextReleased);
563 }
564 
createContext(GrContextOptions & options,ContextType contextType)565 sk_sp<GrDirectContext> VulkanManager::createContext(GrContextOptions& options,
566                                                     ContextType contextType) {
567     auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
568         if (device != VK_NULL_HANDLE) {
569             return vkGetDeviceProcAddr(device, proc_name);
570         }
571         return vkGetInstanceProcAddr(instance, proc_name);
572     };
573 
574     skgpu::VulkanBackendContext backendContext;
575     backendContext.fInstance = mInstance;
576     backendContext.fPhysicalDevice = mPhysicalDevice;
577     backendContext.fDevice = mDevice;
578     backendContext.fQueue =
579             (contextType == ContextType::kRenderThread) ? mGraphicsQueue : mAHBUploadQueue;
580     backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
581     backendContext.fMaxAPIVersion = mAPIVersion;
582     backendContext.fVkExtensions = &mExtensions;
583     backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
584     backendContext.fGetProc = std::move(getProc);
585     backendContext.fDeviceLostContext = nullptr;
586     backendContext.fDeviceLostProc = (contextType == ContextType::kRenderThread)
587                                              ? deviceLostProcRenderThread
588                                              : deviceLostProcUploadThread;
589 
590     LOG_ALWAYS_FATAL_IF(options.fContextDeleteProc != nullptr, "Conflicting fContextDeleteProcs!");
591     this->incStrong((void*)onGrContextReleased);
592     options.fContextDeleteContext = this;
593     options.fContextDeleteProc = onGrContextReleased;
594 
595     return GrDirectContexts::MakeVulkan(backendContext, options);
596 }
597 
getVkFunctorInitParams() const598 VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
599     return VkFunctorInitParams{
600             .instance = mInstance,
601             .physical_device = mPhysicalDevice,
602             .device = mDevice,
603             .queue = mGraphicsQueue,
604             .graphics_queue_index = mGraphicsQueueIndex,
605             .api_version = mAPIVersion,
606             .enabled_instance_extension_names = mInstanceExtensions.data(),
607             .enabled_instance_extension_names_length =
608                     static_cast<uint32_t>(mInstanceExtensions.size()),
609             .enabled_device_extension_names = mDeviceExtensions.data(),
610             .enabled_device_extension_names_length =
611                     static_cast<uint32_t>(mDeviceExtensions.size()),
612             .device_features_2 = &mPhysicalDeviceFeatures2,
613     };
614 }
615 
dequeueNextBuffer(VulkanSurface * surface)616 Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
617     VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
618 
619     if (bufferInfo == nullptr) {
620         ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
621         return Frame(-1, -1, 0);
622     }
623 
624     LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
625 
626     if (bufferInfo->dequeue_fence != -1) {
627         struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
628         bool isSignalPending = false;
629         if (finfo != NULL) {
630             isSignalPending = finfo->status != 1;
631             sync_file_info_free(finfo);
632         }
633         if (isSignalPending) {
634             int fence_clone = dup(bufferInfo->dequeue_fence);
635             if (fence_clone == -1) {
636                 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
637                       errno);
638                 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
639             } else {
640                 VkSemaphoreCreateInfo semaphoreInfo;
641                 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
642                 semaphoreInfo.pNext = nullptr;
643                 semaphoreInfo.flags = 0;
644                 VkSemaphore semaphore;
645                 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
646                 if (err != VK_SUCCESS) {
647                     ALOGE("Failed to create import semaphore, err: %d", err);
648                     close(fence_clone);
649                     sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
650                 } else {
651                     VkImportSemaphoreFdInfoKHR importInfo;
652                     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
653                     importInfo.pNext = nullptr;
654                     importInfo.semaphore = semaphore;
655                     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
656                     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
657                     importInfo.fd = fence_clone;
658 
659                     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
660                     if (err != VK_SUCCESS) {
661                         ALOGE("Failed to import semaphore, err: %d", err);
662                         mDestroySemaphore(mDevice, semaphore, nullptr);
663                         close(fence_clone);
664                         sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
665                     } else {
666                         GrBackendSemaphore beSemaphore = GrBackendSemaphores::MakeVk(semaphore);
667                         // Skia will take ownership of the VkSemaphore and delete it once the wait
668                         // has finished. The VkSemaphore also owns the imported fd, so it will
669                         // close the fd when it is deleted.
670                         bufferInfo->skSurface->wait(1, &beSemaphore);
671                         // The following flush blocks the GPU immediately instead of waiting for
672                         // other drawing ops. It seems dequeue_fence is not respected otherwise.
673                         // TODO: remove the flush after finding why beSemaphore is not working.
674                         skgpu::ganesh::FlushAndSubmit(bufferInfo->skSurface.get());
675                     }
676                 }
677             }
678         }
679     }
680 
681     int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
682     return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
683 }
684 
685 class SharedSemaphoreInfo : public LightRefBase<SharedSemaphoreInfo> {
686     PFN_vkDestroySemaphore mDestroyFunction;
687     VkDevice mDevice;
688     VkSemaphore mSemaphore;
689     GrBackendSemaphore mGrBackendSemaphore;
690 
SharedSemaphoreInfo(PFN_vkDestroySemaphore destroyFunction,VkDevice device,VkSemaphore semaphore)691     SharedSemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
692                         VkSemaphore semaphore)
693             : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {
694         mGrBackendSemaphore = GrBackendSemaphores::MakeVk(mSemaphore);
695     }
696 
~SharedSemaphoreInfo()697     ~SharedSemaphoreInfo() { mDestroyFunction(mDevice, mSemaphore, nullptr); }
698 
699     friend class LightRefBase<SharedSemaphoreInfo>;
700     friend class sp<SharedSemaphoreInfo>;
701 
702 public:
semaphore() const703     VkSemaphore semaphore() const { return mSemaphore; }
704 
grBackendSemaphore()705     GrBackendSemaphore* grBackendSemaphore() { return &mGrBackendSemaphore; }
706 };
707 
destroy_semaphore(void * context)708 static void destroy_semaphore(void* context) {
709     SharedSemaphoreInfo* info = reinterpret_cast<SharedSemaphoreInfo*>(context);
710     info->decStrong(0);
711 }
712 
finishFrame(SkSurface * surface)713 VulkanManager::VkDrawResult VulkanManager::finishFrame(SkSurface* surface) {
714     ATRACE_NAME("Vulkan finish frame");
715 
716     sp<SharedSemaphoreInfo> sharedSemaphore;
717     GrFlushInfo flushInfo;
718 
719     {
720         VkExportSemaphoreCreateInfo exportInfo;
721         exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
722         exportInfo.pNext = nullptr;
723         exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
724 
725         VkSemaphoreCreateInfo semaphoreInfo;
726         semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
727         semaphoreInfo.pNext = &exportInfo;
728         semaphoreInfo.flags = 0;
729         VkSemaphore semaphore;
730         VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
731         ALOGE_IF(VK_SUCCESS != err,
732                  "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
733 
734         if (err == VK_SUCCESS) {
735             sharedSemaphore = sp<SharedSemaphoreInfo>::make(mDestroySemaphore, mDevice, semaphore);
736             flushInfo.fNumSemaphores = 1;
737             flushInfo.fSignalSemaphores = sharedSemaphore->grBackendSemaphore();
738             flushInfo.fFinishedProc = destroy_semaphore;
739             sharedSemaphore->incStrong(0);
740             flushInfo.fFinishedContext = sharedSemaphore.get();
741         }
742     }
743 
744     GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
745     ALOGE_IF(!context, "Surface is not backed by gpu");
746     GrSemaphoresSubmitted submitted = context->flush(
747             surface, SkSurfaces::BackendSurfaceAccess::kPresent, flushInfo);
748     context->submit();
749     VkDrawResult drawResult{
750             .submissionTime = systemTime(),
751     };
752     if (sharedSemaphore) {
753         if (submitted == GrSemaphoresSubmitted::kYes && mFrameBoundaryANDROID) {
754             // retrieve VkImage used as render target
755             VkImage image = VK_NULL_HANDLE;
756             GrBackendRenderTarget backendRenderTarget = SkSurfaces::GetBackendRenderTarget(
757                     surface, SkSurfaces::BackendHandleAccess::kFlushRead);
758             if (backendRenderTarget.isValid()) {
759                 GrVkImageInfo info;
760                 if (GrBackendRenderTargets::GetVkImageInfo(backendRenderTarget, &info)) {
761                     image = info.fImage;
762                 } else {
763                     ALOGE("Frame boundary: backend is not vulkan");
764                 }
765             } else {
766                 ALOGE("Frame boundary: invalid backend render target");
767             }
768             // frameBoundaryANDROID needs to know about mSwapSemaphore, but
769             // it won't wait on it.
770             mFrameBoundaryANDROID(mDevice, sharedSemaphore->semaphore(), image);
771         }
772         VkSemaphoreGetFdInfoKHR getFdInfo;
773         getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
774         getFdInfo.pNext = nullptr;
775         getFdInfo.semaphore = sharedSemaphore->semaphore();
776         getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
777 
778         int fenceFd = -1;
779         VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
780         ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
781         drawResult.presentFence.reset(fenceFd);
782     } else {
783         ALOGE("VulkanManager::finishFrame(): Semaphore submission failed");
784         mQueueWaitIdle(mGraphicsQueue);
785     }
786 
787     skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
788 
789     return drawResult;
790 }
791 
swapBuffers(VulkanSurface * surface,const SkRect & dirtyRect,android::base::unique_fd && presentFence)792 void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect,
793                                 android::base::unique_fd&& presentFence) {
794     if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
795         ATRACE_NAME("Finishing GPU work");
796         mDeviceWaitIdle(mDevice);
797     }
798 
799     surface->presentCurrentBuffer(dirtyRect, presentFence.release());
800 }
801 
destroySurface(VulkanSurface * surface)802 void VulkanManager::destroySurface(VulkanSurface* surface) {
803     // Make sure all submit commands have finished before starting to destroy objects.
804     if (VK_NULL_HANDLE != mGraphicsQueue) {
805         mQueueWaitIdle(mGraphicsQueue);
806     }
807 
808     delete surface;
809 }
810 
createSurface(ANativeWindow * window,ColorMode colorMode,sk_sp<SkColorSpace> surfaceColorSpace,SkColorType surfaceColorType,GrDirectContext * grContext,uint32_t extraBuffers)811 VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
812                                             ColorMode colorMode,
813                                             sk_sp<SkColorSpace> surfaceColorSpace,
814                                             SkColorType surfaceColorType,
815                                             GrDirectContext* grContext,
816                                             uint32_t extraBuffers) {
817     LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
818     if (!window) {
819         return nullptr;
820     }
821 
822     return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
823                                  *this, extraBuffers);
824 }
825 
fenceWait(int fence,GrDirectContext * grContext)826 status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
827     if (!hasVkContext()) {
828         ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
829         return INVALID_OPERATION;
830     }
831 
832     // Block GPU on the fence.
833     int fenceFd = ::dup(fence);
834     if (fenceFd == -1) {
835         ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
836         return -errno;
837     }
838 
839     VkSemaphoreCreateInfo semaphoreInfo;
840     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
841     semaphoreInfo.pNext = nullptr;
842     semaphoreInfo.flags = 0;
843     VkSemaphore semaphore;
844     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
845     if (VK_SUCCESS != err) {
846         close(fenceFd);
847         ALOGE("Failed to create import semaphore, err: %d", err);
848         return UNKNOWN_ERROR;
849     }
850     VkImportSemaphoreFdInfoKHR importInfo;
851     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
852     importInfo.pNext = nullptr;
853     importInfo.semaphore = semaphore;
854     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
855     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
856     importInfo.fd = fenceFd;
857 
858     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
859     if (VK_SUCCESS != err) {
860         mDestroySemaphore(mDevice, semaphore, nullptr);
861         close(fenceFd);
862         ALOGE("Failed to import semaphore, err: %d", err);
863         return UNKNOWN_ERROR;
864     }
865 
866     GrBackendSemaphore beSemaphore = GrBackendSemaphores::MakeVk(semaphore);
867 
868     // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
869     // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
870     grContext->wait(1, &beSemaphore);
871     grContext->flushAndSubmit();
872 
873     return OK;
874 }
875 
createReleaseFence(int * nativeFence,GrDirectContext * grContext)876 status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
877     *nativeFence = -1;
878     if (!hasVkContext()) {
879         ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
880         return INVALID_OPERATION;
881     }
882 
883     VkExportSemaphoreCreateInfo exportInfo;
884     exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
885     exportInfo.pNext = nullptr;
886     exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
887 
888     VkSemaphoreCreateInfo semaphoreInfo;
889     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
890     semaphoreInfo.pNext = &exportInfo;
891     semaphoreInfo.flags = 0;
892     VkSemaphore semaphore;
893     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
894     if (VK_SUCCESS != err) {
895         ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
896         return INVALID_OPERATION;
897     }
898 
899     auto sharedSemaphore = sp<SharedSemaphoreInfo>::make(mDestroySemaphore, mDevice, semaphore);
900 
901     // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
902     GrFlushInfo flushInfo;
903     flushInfo.fNumSemaphores = 1;
904     flushInfo.fSignalSemaphores = sharedSemaphore->grBackendSemaphore();
905     flushInfo.fFinishedProc = destroy_semaphore;
906     sharedSemaphore->incStrong(0);
907     flushInfo.fFinishedContext = sharedSemaphore.get();
908     GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
909     grContext->submit();
910 
911     if (submitted == GrSemaphoresSubmitted::kNo) {
912         ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
913         return INVALID_OPERATION;
914     }
915 
916     VkSemaphoreGetFdInfoKHR getFdInfo;
917     getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
918     getFdInfo.pNext = nullptr;
919     getFdInfo.semaphore = semaphore;
920     getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
921 
922     int fenceFd = 0;
923 
924     err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
925     if (VK_SUCCESS != err) {
926         ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
927         return INVALID_OPERATION;
928     }
929     *nativeFence = fenceFd;
930 
931     return OK;
932 }
933 
934 } /* namespace renderthread */
935 } /* namespace uirenderer */
936 } /* namespace android */
937