xref: /aosp_15_r20/external/mesa3d/src/gfxstream/guest/vulkan_enc/ResourceTracker.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2018 Google
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "ResourceTracker.h"
7 
8 #include "CommandBufferStagingStream.h"
9 #include "DescriptorSetVirtualization.h"
10 #include "HostVisibleMemoryVirtualization.h"
11 #include "Resources.h"
12 #include "VkEncoder.h"
13 #include "gfxstream_vk_private.h"
14 #include "goldfish_address_space.h"
15 #include "goldfish_vk_private_defs.h"
16 #include "util/anon_file.h"
17 #include "util/macros.h"
18 #include "virtgpu_gfxstream_protocol.h"
19 #include "vulkan/vulkan_core.h"
20 
21 #ifdef VK_USE_PLATFORM_ANDROID_KHR
22 #include "vk_format_info.h"
23 #include <vndk/hardware_buffer.h>
24 #endif
25 #include <stdlib.h>
26 
27 #include <algorithm>
28 #include <chrono>
29 #include <set>
30 #include <string>
31 #include <unordered_map>
32 #include <unordered_set>
33 
34 #include "vk_struct_id.h"
35 #include "vk_util.h"
36 
37 #if defined(__linux__)
38 #include <drm_fourcc.h>
39 #endif
40 
41 #ifndef VK_USE_PLATFORM_FUCHSIA
zx_handle_close(zx_handle_t)42 void zx_handle_close(zx_handle_t) {}
zx_event_create(int,zx_handle_t *)43 void zx_event_create(int, zx_handle_t*) {}
44 #endif
45 
46 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
47 
48 namespace gfxstream {
49 namespace vk {
50 
51 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl)       \
52     void mapHandles_##type_name(type_name* handles, size_t count) override {                       \
53         for (size_t i = 0; i < count; ++i) {                                                       \
54             map_impl;                                                                              \
55         }                                                                                          \
56     }                                                                                              \
57     void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s,             \
58                                       size_t count) override {                                     \
59         for (size_t i = 0; i < count; ++i) {                                                       \
60             map_to_u64_impl;                                                                       \
61         }                                                                                          \
62     }                                                                                              \
63     void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
64         override {                                                                                 \
65         for (size_t i = 0; i < count; ++i) {                                                       \
66             map_from_u64_impl;                                                                     \
67         }                                                                                          \
68     }
69 
70 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
71     class class_name : public VulkanHandleMapping {      \
72        public:                                           \
73         virtual ~class_name() {}                         \
74         GOLDFISH_VK_LIST_HANDLE_TYPES(impl)              \
75     };
76 
77 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name)                                \
78     MAKE_HANDLE_MAPPING_FOREACH(                                               \
79         type_name, handles[i] = new_from_host_##type_name(handles[i]);         \
80         ResourceTracker::get()->register_##type_name(handles[i]);              \
81         , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]),    \
82         handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
83         ResourceTracker::get()->register_##type_name(handles[i]);)
84 
85 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name)                          \
86     MAKE_HANDLE_MAPPING_FOREACH(                                         \
87         type_name, handles[i] = get_host_##type_name(handles[i]),        \
88         handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
89         handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
90 
91 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name)                                               \
92     MAKE_HANDLE_MAPPING_FOREACH(type_name,                                                     \
93                                 ResourceTracker::get()->unregister_##type_name(handles[i]);    \
94                                 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
95                                 delete_goldfish_##type_name(handles[i]), (void)handles[i];     \
96                                 delete_goldfish_##type_name((type_name)handle_u64s[i]))
97 
98 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
99 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
100 
101 static uint32_t* sSeqnoPtr = nullptr;
102 
103 // static
104 uint32_t ResourceTracker::streamFeatureBits = 0;
105 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
106 
107 struct StagingInfo {
108     std::mutex mLock;
109     std::vector<CommandBufferStagingStream*> streams;
110     std::vector<VkEncoder*> encoders;
111     /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
112     /// \param allocFn is the callback to allocate memory
113     /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo114     void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
115                       CommandBufferStagingStream::Free&& freeFn) {
116         mAlloc = allocFn;
117         mFree = freeFn;
118     }
119 
~StagingInfogfxstream::vk::StagingInfo120     ~StagingInfo() {
121         for (auto stream : streams) {
122             delete stream;
123         }
124 
125         for (auto encoder : encoders) {
126             delete encoder;
127         }
128     }
129 
pushStaginggfxstream::vk::StagingInfo130     void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
131         std::lock_guard<std::mutex> lock(mLock);
132         stream->reset();
133         streams.push_back(stream);
134         encoders.push_back(encoder);
135     }
136 
popStaginggfxstream::vk::StagingInfo137     void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
138         std::lock_guard<std::mutex> lock(mLock);
139         CommandBufferStagingStream* stream;
140         VkEncoder* encoder;
141         if (streams.empty()) {
142             if (mAlloc && mFree) {
143                 // if custom allocators are provided, forward them to CommandBufferStagingStream
144                 stream = new CommandBufferStagingStream(mAlloc, mFree);
145             } else {
146                 stream = new CommandBufferStagingStream;
147             }
148             encoder = new VkEncoder(stream);
149         } else {
150             stream = streams.back();
151             encoder = encoders.back();
152             streams.pop_back();
153             encoders.pop_back();
154         }
155         *streamOut = stream;
156         *encoderOut = encoder;
157     }
158 
159    private:
160     CommandBufferStagingStream::Alloc mAlloc = nullptr;
161     CommandBufferStagingStream::Free mFree = nullptr;
162 };
163 
164 static StagingInfo sStaging;
165 
166 struct CommandBufferPendingDescriptorSets {
167     std::unordered_set<VkDescriptorSet> sets;
168 };
169 
170 #define HANDLE_REGISTER_IMPL_IMPL(type)                    \
171     void ResourceTracker::register_##type(type obj) {      \
172         std::lock_guard<std::recursive_mutex> lock(mLock); \
173         info_##type[obj] = type##_Info();                  \
174     }
175 
176 #define HANDLE_UNREGISTER_IMPL_IMPL(type)                  \
177     void ResourceTracker::unregister_##type(type obj) {    \
178         std::lock_guard<std::recursive_mutex> lock(mLock); \
179         info_##type.erase(obj);                            \
180     }
181 
182 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)183 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
184 uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
185 
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)186 uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
187     return pSubmit.waitSemaphoreInfoCount;
188 }
189 
getCommandBufferCount(const VkSubmitInfo & pSubmit)190 uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
191 
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)192 uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
193     return pSubmit.commandBufferInfoCount;
194 }
195 
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)196 uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
197     return pSubmit.signalSemaphoreCount;
198 }
199 
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)200 uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
201     return pSubmit.signalSemaphoreInfoCount;
202 }
203 
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)204 VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
205     return pSubmit.pWaitSemaphores[i];
206 }
207 
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)208 VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
209     return pSubmit.pWaitSemaphoreInfos[i].semaphore;
210 }
211 
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)212 VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
213     return pSubmit.pSignalSemaphores[i];
214 }
215 
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)216 VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
217     return pSubmit.pSignalSemaphoreInfos[i].semaphore;
218 }
219 
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)220 VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
221     return pSubmit.pCommandBuffers[i];
222 }
223 
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)224 VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
225     return pSubmit.pCommandBufferInfos[i].commandBuffer;
226 }
227 
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)228 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
229     return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
230            VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
231 }
232 
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)233 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
234     VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
235     const VkDescriptorImageInfo* pImageInfo) {
236     VkDescriptorImageInfo res = *pImageInfo;
237 
238     if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
239         descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
240         return res;
241 
242     bool immutableSampler =
243         as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
244 
245     if (!immutableSampler) return res;
246 
247     res.sampler = 0;
248 
249     return res;
250 }
251 
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)252 bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
253     return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
254 }
255 
isHostVisible(const VkPhysicalDeviceMemoryProperties * memoryProps,uint32_t index)256 static bool isHostVisible(const VkPhysicalDeviceMemoryProperties* memoryProps, uint32_t index) {
257     return memoryProps->memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
258 }
259 
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)260 VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
261     const VkDescriptorImageInfo& inputInfo) {
262     VkSampler sampler = inputInfo.sampler;
263 
264     VkDescriptorImageInfo res = inputInfo;
265 
266     if (sampler) {
267         auto it = info_VkSampler.find(sampler);
268         bool samplerExists = it != info_VkSampler.end();
269         if (!samplerExists) res.sampler = 0;
270     }
271 
272     return res;
273 }
274 
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex)275 void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
276                                              VkDeviceMemoryReportEventTypeEXT type,
277                                              uint64_t memoryObjectId, VkDeviceSize size,
278                                              VkObjectType objectType, uint64_t objectHandle,
279                                              uint32_t heapIndex) {
280     if (info.deviceMemoryReportCallbacks.empty()) return;
281 
282     const VkDeviceMemoryReportCallbackDataEXT callbackData = {
283         VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT,  // sType
284         nullptr,                                                   // pNext
285         0,                                                         // flags
286         type,                                                      // type
287         memoryObjectId,                                            // memoryObjectId
288         size,                                                      // size
289         objectType,                                                // objectType
290         objectHandle,                                              // objectHandle
291         heapIndex,                                                 // heapIndex
292     };
293     for (const auto& callback : info.deviceMemoryReportCallbacks) {
294         callback.first(&callbackData, callback.second);
295     }
296 }
297 
298 #ifdef VK_USE_PLATFORM_FUCHSIA
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)299 inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
300     size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
301     size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
302     size_t minBufferCountForSharedSlack = 0u) {
303     fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
304     constraints.min_buffer_count = minBufferCount;
305     if (maxBufferCount > 0) {
306         constraints.max_buffer_count = maxBufferCount;
307     }
308     if (minBufferCountForCamping) {
309         constraints.min_buffer_count_for_camping = minBufferCountForCamping;
310     }
311     if (minBufferCountForSharedSlack) {
312         constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
313     }
314     constraints.has_buffer_memory_constraints = true;
315     fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
316         constraints.buffer_memory_constraints;
317 
318     buffer_constraints.min_size_bytes = minSizeBytes;
319     buffer_constraints.max_size_bytes = 0xffffffff;
320     buffer_constraints.physically_contiguous_required = false;
321     buffer_constraints.secure_required = false;
322 
323     // No restrictions on coherency domain or Heaps.
324     buffer_constraints.ram_domain_supported = true;
325     buffer_constraints.cpu_domain_supported = true;
326     buffer_constraints.inaccessible_domain_supported = true;
327     buffer_constraints.heap_permitted_count = 2;
328     buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
329     buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
330 
331     return constraints;
332 }
333 
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)334 uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
335     uint32_t usage = 0u;
336     VkImageUsageFlags imageUsage = pImageInfo->usage;
337 
338 #define SetUsageBit(BIT, VALUE)                                  \
339     if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) {               \
340         usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
341     }
342 
343     SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
344     SetUsageBit(TRANSFER_SRC, TransferSrc);
345     SetUsageBit(TRANSFER_DST, TransferDst);
346     SetUsageBit(SAMPLED, Sampled);
347 
348 #undef SetUsageBit
349     return usage;
350 }
351 
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)352 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
353     uint32_t usage = 0u;
354 
355 #define SetUsageBit(BIT, VALUE)                                   \
356     if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) {              \
357         usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
358     }
359 
360     SetUsageBit(TRANSFER_SRC, TransferSrc);
361     SetUsageBit(TRANSFER_DST, TransferDst);
362     SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
363     SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
364     SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
365     SetUsageBit(STORAGE_BUFFER, StorageBuffer);
366     SetUsageBit(INDEX_BUFFER, IndexBuffer);
367     SetUsageBit(VERTEX_BUFFER, VertexBuffer);
368     SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
369 
370 #undef SetUsageBit
371     return usage;
372 }
373 
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)374 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
375     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
376     VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
377     return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
378 }
379 
vkFormatTypeToSysmem(VkFormat format)380 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
381     switch (format) {
382         case VK_FORMAT_B8G8R8A8_SINT:
383         case VK_FORMAT_B8G8R8A8_UNORM:
384         case VK_FORMAT_B8G8R8A8_SRGB:
385         case VK_FORMAT_B8G8R8A8_SNORM:
386         case VK_FORMAT_B8G8R8A8_SSCALED:
387         case VK_FORMAT_B8G8R8A8_USCALED:
388             return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
389         case VK_FORMAT_R8G8B8A8_SINT:
390         case VK_FORMAT_R8G8B8A8_UNORM:
391         case VK_FORMAT_R8G8B8A8_SRGB:
392         case VK_FORMAT_R8G8B8A8_SNORM:
393         case VK_FORMAT_R8G8B8A8_SSCALED:
394         case VK_FORMAT_R8G8B8A8_USCALED:
395             return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
396         case VK_FORMAT_R8_UNORM:
397         case VK_FORMAT_R8_UINT:
398         case VK_FORMAT_R8_USCALED:
399         case VK_FORMAT_R8_SNORM:
400         case VK_FORMAT_R8_SINT:
401         case VK_FORMAT_R8_SSCALED:
402         case VK_FORMAT_R8_SRGB:
403             return fuchsia_sysmem::wire::PixelFormatType::kR8;
404         case VK_FORMAT_R8G8_UNORM:
405         case VK_FORMAT_R8G8_UINT:
406         case VK_FORMAT_R8G8_USCALED:
407         case VK_FORMAT_R8G8_SNORM:
408         case VK_FORMAT_R8G8_SINT:
409         case VK_FORMAT_R8G8_SSCALED:
410         case VK_FORMAT_R8G8_SRGB:
411             return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
412         default:
413             return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
414     }
415 }
416 
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)417 static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
418                                         fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
419     switch (vkFormat) {
420         case VK_FORMAT_B8G8R8A8_SINT:
421         case VK_FORMAT_B8G8R8A8_UNORM:
422         case VK_FORMAT_B8G8R8A8_SRGB:
423         case VK_FORMAT_B8G8R8A8_SNORM:
424         case VK_FORMAT_B8G8R8A8_SSCALED:
425         case VK_FORMAT_B8G8R8A8_USCALED:
426             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
427         case VK_FORMAT_R8G8B8A8_SINT:
428         case VK_FORMAT_R8G8B8A8_UNORM:
429         case VK_FORMAT_R8G8B8A8_SRGB:
430         case VK_FORMAT_R8G8B8A8_SNORM:
431         case VK_FORMAT_R8G8B8A8_SSCALED:
432         case VK_FORMAT_R8G8B8A8_USCALED:
433             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
434         case VK_FORMAT_R8_UNORM:
435         case VK_FORMAT_R8_UINT:
436         case VK_FORMAT_R8_USCALED:
437         case VK_FORMAT_R8_SNORM:
438         case VK_FORMAT_R8_SINT:
439         case VK_FORMAT_R8_SSCALED:
440         case VK_FORMAT_R8_SRGB:
441             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
442                    sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
443         case VK_FORMAT_R8G8_UNORM:
444         case VK_FORMAT_R8G8_UINT:
445         case VK_FORMAT_R8G8_USCALED:
446         case VK_FORMAT_R8G8_SNORM:
447         case VK_FORMAT_R8G8_SINT:
448         case VK_FORMAT_R8G8_SSCALED:
449         case VK_FORMAT_R8G8_SRGB:
450             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
451         default:
452             return false;
453     }
454 }
455 
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)456 static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
457     switch (format) {
458         case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
459             return VK_FORMAT_B8G8R8A8_SRGB;
460         case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
461             return VK_FORMAT_R8G8B8A8_SRGB;
462         case fuchsia_sysmem::wire::PixelFormatType::kL8:
463         case fuchsia_sysmem::wire::PixelFormatType::kR8:
464             return VK_FORMAT_R8_UNORM;
465         case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
466             return VK_FORMAT_R8G8_UNORM;
467         default:
468             return VK_FORMAT_UNDEFINED;
469     }
470 }
471 
472 // TODO(fxbug.dev/42172354): This is currently only used for allocating
473 // memory for dedicated external images. It should be migrated to use
474 // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)475 VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
476     VkEncoder* enc, VkDevice device,
477     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
478     const VkImageCreateInfo* pImageInfo) {
479     if (pImageInfo == nullptr) {
480         mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null.");
481         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
482     }
483 
484     const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
485         .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
486         .pNext = nullptr,
487         .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
488     };
489 
490     std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
491     if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
492         const auto kFormats = {
493             VK_FORMAT_B8G8R8A8_SRGB,
494             VK_FORMAT_R8G8B8A8_SRGB,
495         };
496         for (auto format : kFormats) {
497             // shallow copy, using pNext from pImageInfo directly.
498             auto createInfo = *pImageInfo;
499             createInfo.format = format;
500             formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
501                 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
502                 .pNext = nullptr,
503                 .imageCreateInfo = createInfo,
504                 .colorSpaceCount = 1,
505                 .pColorSpaces = &kDefaultColorSpace,
506             });
507         }
508     } else {
509         formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
510             .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
511             .pNext = nullptr,
512             .imageCreateInfo = *pImageInfo,
513             .colorSpaceCount = 1,
514             .pColorSpaces = &kDefaultColorSpace,
515         });
516     }
517 
518     VkImageConstraintsInfoFUCHSIA imageConstraints = {
519         .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
520         .pNext = nullptr,
521         .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
522         .pFormatConstraints = formatInfos.data(),
523         .bufferCollectionConstraints =
524             VkBufferCollectionConstraintsInfoFUCHSIA{
525                 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
526                 .pNext = nullptr,
527                 .minBufferCount = 1,
528                 .maxBufferCount = 0,
529                 .minBufferCountForCamping = 0,
530                 .minBufferCountForDedicatedSlack = 0,
531                 .minBufferCountForSharedSlack = 0,
532             },
533         .flags = 0u,
534     };
535 
536     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
537 }
538 
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)539 VkResult addImageBufferCollectionConstraintsFUCHSIA(
540     VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
541     const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints,  // always non-zero
542     VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
543     // First check if the format, tiling and usage is supported on host.
544     VkImageFormatProperties imageFormatProperties;
545     auto createInfo = &formatConstraints->imageCreateInfo;
546     auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
547         physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
548         createInfo->flags, &imageFormatProperties, true /* do lock */);
549     if (result != VK_SUCCESS) {
550         mesa_logd(
551             "%s: Image format (%u) type (%u) tiling (%u) "
552             "usage (%u) flags (%u) not supported by physical "
553             "device",
554             __func__, static_cast<uint32_t>(createInfo->format),
555             static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
556             static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
557         return VK_ERROR_FORMAT_NOT_SUPPORTED;
558     }
559 
560     // Check if format constraints contains unsupported format features.
561     {
562         VkFormatProperties formatProperties;
563         enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
564                                                  &formatProperties, true /* do lock */);
565 
566         auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
567                                      ? formatProperties.linearTilingFeatures
568                                      : formatProperties.optimalTilingFeatures;
569         auto requiredFeatures = formatConstraints->requiredFormatFeatures;
570         if ((~supportedFeatures) & requiredFeatures) {
571             mesa_logd(
572                 "%s: Host device support features for %s tiling: %08x, "
573                 "required features: %08x, feature bits %08x missing",
574                 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
575                 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
576                 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
577             return VK_ERROR_FORMAT_NOT_SUPPORTED;
578         }
579     }
580 
581     fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
582     if (formatConstraints->sysmemPixelFormat != 0) {
583         auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
584             formatConstraints->sysmemPixelFormat);
585         if (createInfo->format != VK_FORMAT_UNDEFINED &&
586             !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
587             mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
588                   static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
589             return VK_ERROR_FORMAT_NOT_SUPPORTED;
590         }
591         imageConstraints.pixel_format.type = pixelFormat;
592     } else {
593         auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
594         if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
595             mesa_logd("%s: Unsupported VkFormat %u", __func__,
596                   static_cast<uint32_t>(createInfo->format));
597             return VK_ERROR_FORMAT_NOT_SUPPORTED;
598         }
599         imageConstraints.pixel_format.type = pixel_format;
600     }
601 
602     imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
603     for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
604         imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
605             formatConstraints->pColorSpaces[i].colorSpace);
606     }
607 
608     // Get row alignment from host GPU.
609     VkDeviceSize offset = 0;
610     VkDeviceSize rowPitchAlignment = 1u;
611 
612     if (tiling == VK_IMAGE_TILING_LINEAR) {
613         VkImageCreateInfo createInfoDup = *createInfo;
614         createInfoDup.pNext = nullptr;
615         enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
616                                            true /* do lock */);
617         mesa_logd(
618             "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
619             "rowPitchAlignment = %lu",
620             (int)createInfo->format, offset, rowPitchAlignment);
621     }
622 
623     imageConstraints.min_coded_width = createInfo->extent.width;
624     imageConstraints.max_coded_width = 0xfffffff;
625     imageConstraints.min_coded_height = createInfo->extent.height;
626     imageConstraints.max_coded_height = 0xffffffff;
627     // The min_bytes_per_row can be calculated by sysmem using
628     // |min_coded_width|, |bytes_per_row_divisor| and color format.
629     imageConstraints.min_bytes_per_row = 0;
630     imageConstraints.max_bytes_per_row = 0xffffffff;
631     imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
632 
633     imageConstraints.layers = 1;
634     imageConstraints.coded_width_divisor = 1;
635     imageConstraints.coded_height_divisor = 1;
636     imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
637     imageConstraints.start_offset_divisor = 1;
638     imageConstraints.display_width_divisor = 1;
639     imageConstraints.display_height_divisor = 1;
640     imageConstraints.pixel_format.has_format_modifier = true;
641     imageConstraints.pixel_format.format_modifier.value =
642         (tiling == VK_IMAGE_TILING_LINEAR)
643             ? fuchsia_sysmem::wire::kFormatModifierLinear
644             : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
645 
646     constraints->image_format_constraints[constraints->image_format_constraints_count++] =
647         imageConstraints;
648     return VK_SUCCESS;
649 }
650 
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)651 SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
652     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
653     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
654     const auto& collection = *pCollection;
655     if (pBufferConstraintsInfo == nullptr) {
656         mesa_loge(
657             "setBufferCollectionBufferConstraints: "
658             "pBufferConstraintsInfo cannot be null.");
659         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
660     }
661 
662     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
663         defaultBufferCollectionConstraints(
664             /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
665             /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
666     constraints.usage.vulkan =
667         getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
668 
669     constexpr uint32_t kVulkanPriority = 5;
670     const char kName[] = "GoldfishBufferSysmemShared";
671     collection->SetName(kVulkanPriority, fidl::StringView(kName));
672 
673     auto result = collection->SetConstraints(true, constraints);
674     if (!result.ok()) {
675         mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
676         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
677     }
678 
679     return {VK_SUCCESS, constraints};
680 }
681 #endif
682 
683 #ifdef VK_USE_PLATFORM_ANDROID_KHR
getAHardwareBufferId(AHardwareBuffer * ahw)684 uint64_t ResourceTracker::getAHardwareBufferId(AHardwareBuffer* ahw) {
685     uint64_t id = 0;
686     mGralloc->getId(ahw, &id);
687     return id;
688 }
689 #endif
690 
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)691 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
692     VkMemoryDedicatedRequirements* dedicatedReqs) {
693     dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
694     dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
695 }
696 
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)697 void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
698                                                                      VkMemoryRequirements* reqs) {
699 #ifdef VK_USE_PLATFORM_FUCHSIA
700     auto it = info_VkImage.find(image);
701     if (it == info_VkImage.end()) return;
702     auto& info = it->second;
703     if (info.isSysmemBackedMemory) {
704         auto width = info.createInfo.extent.width;
705         auto height = info.createInfo.extent.height;
706         reqs->size = width * height * 4;
707     }
708 #else
709     // Bypass "unused parameter" checks.
710     (void)image;
711     (void)reqs;
712 #endif
713 }
714 
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)715 CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
716                                                             VkDeviceMemory_Info& info) {
717     if (info.coherentMemory && info.ptr) {
718         if (info.coherentMemory->getDeviceMemory() != memory) {
719             delete_goldfish_VkDeviceMemory(memory);
720         }
721 
722         if (info.ptr) {
723             info.coherentMemory->release(info.ptr);
724             info.ptr = nullptr;
725         }
726 
727         return std::move(info.coherentMemory);
728     }
729 
730     return nullptr;
731 }
732 
acquireSync(uint64_t syncId,int64_t & osHandle)733 VkResult acquireSync(uint64_t syncId, int64_t& osHandle) {
734     struct VirtGpuExecBuffer exec = {};
735     struct gfxstreamAcquireSync acquireSync = {};
736     VirtGpuDevice* instance = VirtGpuDevice::getInstance();
737 
738     acquireSync.hdr.opCode = GFXSTREAM_ACQUIRE_SYNC;
739     acquireSync.syncId = syncId;
740 
741     exec.command = static_cast<void*>(&acquireSync);
742     exec.command_size = sizeof(acquireSync);
743     exec.flags = kFenceOut | kRingIdx | kShareableOut;
744 
745     if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
746 
747     osHandle = exec.handle.osHandle;
748     return VK_SUCCESS;
749 }
750 
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)751 VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
752     struct VirtGpuExecBuffer exec = {};
753     struct gfxstreamCreateExportSyncVK exportSync = {};
754     VirtGpuDevice* instance = VirtGpuDevice::getInstance();
755 
756     uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
757 
758     exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
759     exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
760     exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
761     exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
762     exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
763 
764     exec.command = static_cast<void*>(&exportSync);
765     exec.command_size = sizeof(exportSync);
766     exec.flags = kFenceOut | kRingIdx;
767     if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
768 
769     osHandle = exec.handle.osHandle;
770     return VK_SUCCESS;
771 }
772 
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)773 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
774                                              std::unordered_set<VkDescriptorSet>& allDs) {
775     if (workingSet.empty()) return;
776 
777     std::vector<VkCommandBuffer> nextLevel;
778     for (auto commandBuffer : workingSet) {
779         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
780         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
781             nextLevel.push_back((VkCommandBuffer)secondary);
782         });
783     }
784 
785     collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
786 
787     for (auto cmdbuf : workingSet) {
788         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
789 
790         if (!cb->userPtr) {
791             continue;  // No descriptors to update.
792         }
793 
794         CommandBufferPendingDescriptorSets* pendingDescriptorSets =
795             (CommandBufferPendingDescriptorSets*)(cb->userPtr);
796 
797         if (pendingDescriptorSets->sets.empty()) {
798             continue;  // No descriptors to update.
799         }
800 
801         allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
802     }
803 }
804 
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)805 void commitDescriptorSetUpdates(void* context, VkQueue queue,
806                                 const std::unordered_set<VkDescriptorSet>& sets) {
807     VkEncoder* enc = (VkEncoder*)context;
808 
809     std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
810     std::vector<VkDescriptorPool> pools;
811     std::vector<VkDescriptorSetLayout> setLayouts;
812     std::vector<uint64_t> poolIds;
813     std::vector<uint32_t> descriptorSetWhichPool;
814     std::vector<uint32_t> pendingAllocations;
815     std::vector<uint32_t> writeStartingIndices;
816     std::vector<VkWriteDescriptorSet> writesForHost;
817 
818     uint32_t poolIndex = 0;
819     uint32_t currentWriteIndex = 0;
820     for (auto set : sets) {
821         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
822         VkDescriptorPool pool = reified->pool;
823         VkDescriptorSetLayout setLayout = reified->setLayout;
824 
825         auto it = poolSet.find(pool);
826         if (it == poolSet.end()) {
827             poolSet[pool] = poolIndex;
828             descriptorSetWhichPool.push_back(poolIndex);
829             pools.push_back(pool);
830             ++poolIndex;
831         } else {
832             uint32_t savedPoolIndex = it->second;
833             descriptorSetWhichPool.push_back(savedPoolIndex);
834         }
835 
836         poolIds.push_back(reified->poolId);
837         setLayouts.push_back(setLayout);
838         pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
839         writeStartingIndices.push_back(currentWriteIndex);
840 
841         auto& writes = reified->allWrites;
842 
843         for (size_t i = 0; i < writes.size(); ++i) {
844             uint32_t binding = i;
845 
846             for (size_t j = 0; j < writes[i].size(); ++j) {
847                 auto& write = writes[i][j];
848 
849                 if (write.type == DescriptorWriteType::Empty) continue;
850 
851                 uint32_t dstArrayElement = 0;
852 
853                 VkDescriptorImageInfo* imageInfo = nullptr;
854                 VkDescriptorBufferInfo* bufferInfo = nullptr;
855                 VkBufferView* bufferView = nullptr;
856 
857                 switch (write.type) {
858                     case DescriptorWriteType::Empty:
859                         break;
860                     case DescriptorWriteType::ImageInfo:
861                         dstArrayElement = j;
862                         imageInfo = &write.imageInfo;
863                         break;
864                     case DescriptorWriteType::BufferInfo:
865                         dstArrayElement = j;
866                         bufferInfo = &write.bufferInfo;
867                         break;
868                     case DescriptorWriteType::BufferView:
869                         dstArrayElement = j;
870                         bufferView = &write.bufferView;
871                         break;
872                     case DescriptorWriteType::InlineUniformBlock:
873                     case DescriptorWriteType::AccelerationStructure:
874                         // TODO
875                         mesa_loge(
876                             "Encountered pending inline uniform block or acceleration structure "
877                             "desc write, abort (NYI)\n");
878                         abort();
879                     default:
880                         break;
881                 }
882 
883                 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
884                 VkWriteDescriptorSet forHost = {
885                     VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
886                     0 /* TODO: inline uniform block */,
887                     set,
888                     binding,
889                     dstArrayElement,
890                     1,
891                     write.descriptorType,
892                     imageInfo,
893                     bufferInfo,
894                     bufferView,
895                 };
896 
897                 writesForHost.push_back(forHost);
898                 ++currentWriteIndex;
899 
900                 // Set it back to empty.
901                 write.type = DescriptorWriteType::Empty;
902             }
903         }
904     }
905 
906     // Skip out if there's nothing to VkWriteDescriptorSet home about.
907     if (writesForHost.empty()) {
908         return;
909     }
910 
911     enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
912         queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
913         poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
914         writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
915         false /* no lock */);
916 
917     // If we got here, then we definitely serviced the allocations.
918     for (auto set : sets) {
919         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
920         reified->allocationPending = false;
921     }
922 }
923 
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)924 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
925                                                        VkEncoder* currentEncoder) {
926     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
927     if (!cb) return 0;
928 
929     auto lastEncoder = cb->lastUsedEncoder;
930 
931     if (lastEncoder == currentEncoder) return 0;
932 
933     currentEncoder->incRef();
934 
935     cb->lastUsedEncoder = currentEncoder;
936 
937     if (!lastEncoder) return 0;
938 
939     auto oldSeq = cb->sequenceNumber;
940     cb->sequenceNumber += 2;
941     lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
942                                                true /* do lock */);
943     lastEncoder->flush();
944     currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
945                                                   true /* do lock */);
946 
947     if (lastEncoder->decRef()) {
948         cb->lastUsedEncoder = nullptr;
949     }
950     return 0;
951 }
952 
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)953 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
954                               const VkDescriptorSet* pDescriptorSets) {
955     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
956 
957     if (!cb->userPtr) {
958         CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
959         cb->userPtr = newPendingSets;
960     }
961 
962     CommandBufferPendingDescriptorSets* pendingSets =
963         (CommandBufferPendingDescriptorSets*)cb->userPtr;
964 
965     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
966         pendingSets->sets.insert(pDescriptorSets[i]);
967     }
968 }
969 
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)970 void decDescriptorSetLayoutRef(void* context, VkDevice device,
971                                VkDescriptorSetLayout descriptorSetLayout,
972                                const VkAllocationCallbacks* pAllocator) {
973     if (!descriptorSetLayout) return;
974 
975     struct goldfish_VkDescriptorSetLayout* setLayout =
976         as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
977 
978     if (0 == --setLayout->layoutInfo->refcount) {
979         VkEncoder* enc = (VkEncoder*)context;
980         enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
981                                           true /* do lock */);
982     }
983 }
984 
ensureSyncDeviceFd()985 void ResourceTracker::ensureSyncDeviceFd() {
986 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
987     if (mSyncDeviceFd >= 0) return;
988     mSyncDeviceFd = goldfish_sync_open();
989     if (mSyncDeviceFd >= 0) {
990         mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
991     } else {
992         mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__);
993     }
994 #endif
995 }
996 
unregister_VkInstance(VkInstance instance)997 void ResourceTracker::unregister_VkInstance(VkInstance instance) {
998     std::lock_guard<std::recursive_mutex> lock(mLock);
999 
1000     auto it = info_VkInstance.find(instance);
1001     if (it == info_VkInstance.end()) return;
1002     auto info = it->second;
1003     info_VkInstance.erase(instance);
1004 }
1005 
unregister_VkDevice(VkDevice device)1006 void ResourceTracker::unregister_VkDevice(VkDevice device) {
1007     std::lock_guard<std::recursive_mutex> lock(mLock);
1008 
1009     auto it = info_VkDevice.find(device);
1010     if (it == info_VkDevice.end()) return;
1011     auto info = it->second;
1012     info_VkDevice.erase(device);
1013 }
1014 
unregister_VkCommandPool(VkCommandPool pool)1015 void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1016     if (!pool) return;
1017 
1018     clearCommandPool(pool);
1019 
1020     std::lock_guard<std::recursive_mutex> lock(mLock);
1021     info_VkCommandPool.erase(pool);
1022 }
1023 
unregister_VkSampler(VkSampler sampler)1024 void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1025     if (!sampler) return;
1026 
1027     std::lock_guard<std::recursive_mutex> lock(mLock);
1028     info_VkSampler.erase(sampler);
1029 }
1030 
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)1031 void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1032     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1033                                   true /* also clear pending descriptor sets */);
1034 
1035     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1036     if (!cb) return;
1037     if (cb->lastUsedEncoder) {
1038         cb->lastUsedEncoder->decRef();
1039     }
1040     eraseObjects(&cb->subObjects);
1041     forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1042         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1043         eraseObject(&p->subObjects, (void*)cb);
1044     });
1045     eraseObjects(&cb->poolObjects);
1046 
1047     if (cb->userPtr) {
1048         CommandBufferPendingDescriptorSets* pendingSets =
1049             (CommandBufferPendingDescriptorSets*)cb->userPtr;
1050         delete pendingSets;
1051     }
1052 
1053     std::lock_guard<std::recursive_mutex> lock(mLock);
1054     info_VkCommandBuffer.erase(commandBuffer);
1055 }
1056 
unregister_VkQueue(VkQueue queue)1057 void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1058     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1059     if (!q) return;
1060     if (q->lastUsedEncoder) {
1061         q->lastUsedEncoder->decRef();
1062     }
1063 
1064     std::lock_guard<std::recursive_mutex> lock(mLock);
1065     info_VkQueue.erase(queue);
1066 }
1067 
unregister_VkDeviceMemory(VkDeviceMemory mem)1068 void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1069     std::lock_guard<std::recursive_mutex> lock(mLock);
1070 
1071     auto it = info_VkDeviceMemory.find(mem);
1072     if (it == info_VkDeviceMemory.end()) return;
1073 
1074     auto& memInfo = it->second;
1075 
1076 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1077     if (memInfo.ahw) {
1078         mGralloc->release(memInfo.ahw);
1079     }
1080 #endif
1081 
1082     if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1083         zx_handle_close(memInfo.vmoHandle);
1084     }
1085 
1086     info_VkDeviceMemory.erase(mem);
1087 }
1088 
unregister_VkImage(VkImage img)1089 void ResourceTracker::unregister_VkImage(VkImage img) {
1090     std::lock_guard<std::recursive_mutex> lock(mLock);
1091 
1092     auto it = info_VkImage.find(img);
1093     if (it == info_VkImage.end()) return;
1094 
1095     auto& imageInfo = it->second;
1096 
1097     info_VkImage.erase(img);
1098 }
1099 
unregister_VkBuffer(VkBuffer buf)1100 void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1101     std::lock_guard<std::recursive_mutex> lock(mLock);
1102 
1103     auto it = info_VkBuffer.find(buf);
1104     if (it == info_VkBuffer.end()) return;
1105 
1106     info_VkBuffer.erase(buf);
1107 }
1108 
unregister_VkSemaphore(VkSemaphore sem)1109 void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1110     std::lock_guard<std::recursive_mutex> lock(mLock);
1111 
1112     auto it = info_VkSemaphore.find(sem);
1113     if (it == info_VkSemaphore.end()) return;
1114 
1115     auto& semInfo = it->second;
1116 
1117     if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1118         zx_handle_close(semInfo.eventHandle);
1119     }
1120 
1121 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1122     if (semInfo.syncFd.value_or(-1) >= 0) {
1123         mSyncHelper->close(semInfo.syncFd.value());
1124     }
1125 #endif
1126 
1127     info_VkSemaphore.erase(sem);
1128 }
1129 
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)1130 void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1131     std::lock_guard<std::recursive_mutex> lock(mLock);
1132     auto it = info_VkDescriptorUpdateTemplate.find(templ);
1133     if (it == info_VkDescriptorUpdateTemplate.end()) return;
1134 
1135     auto& info = it->second;
1136     if (info.templateEntryCount) delete[] info.templateEntries;
1137     if (info.imageInfoCount) {
1138         delete[] info.imageInfoIndices;
1139         delete[] info.imageInfos;
1140     }
1141     if (info.bufferInfoCount) {
1142         delete[] info.bufferInfoIndices;
1143         delete[] info.bufferInfos;
1144     }
1145     if (info.bufferViewCount) {
1146         delete[] info.bufferViewIndices;
1147         delete[] info.bufferViews;
1148     }
1149     info_VkDescriptorUpdateTemplate.erase(it);
1150 }
1151 
unregister_VkFence(VkFence fence)1152 void ResourceTracker::unregister_VkFence(VkFence fence) {
1153     std::lock_guard<std::recursive_mutex> lock(mLock);
1154     auto it = info_VkFence.find(fence);
1155     if (it == info_VkFence.end()) return;
1156 
1157     auto& fenceInfo = it->second;
1158     (void)fenceInfo;
1159 
1160 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1161     if (fenceInfo.syncFd && *fenceInfo.syncFd >= 0) {
1162         mSyncHelper->close(*fenceInfo.syncFd);
1163     }
1164 #endif
1165 
1166     info_VkFence.erase(fence);
1167 }
1168 
1169 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)1170 void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1171     std::lock_guard<std::recursive_mutex> lock(mLock);
1172     info_VkBufferCollectionFUCHSIA.erase(collection);
1173 }
1174 #endif
1175 
unregister_VkDescriptorSet_locked(VkDescriptorSet set)1176 void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
1177     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1178     delete ds->reified;
1179     info_VkDescriptorSet.erase(set);
1180 }
1181 
unregister_VkDescriptorSet(VkDescriptorSet set)1182 void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1183     if (!set) return;
1184 
1185     std::lock_guard<std::recursive_mutex> lock(mLock);
1186     unregister_VkDescriptorSet_locked(set);
1187 }
1188 
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)1189 void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1190     if (!setLayout) return;
1191 
1192     std::lock_guard<std::recursive_mutex> lock(mLock);
1193     delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1194     info_VkDescriptorSetLayout.erase(setLayout);
1195 }
1196 
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)1197 void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1198                                                         uint32_t descriptorSetCount,
1199                                                         const VkDescriptorSet* sets) {
1200     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1201         struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1202         if (ds->reified->allocationPending) {
1203             unregister_VkDescriptorSet(sets[i]);
1204             delete_goldfish_VkDescriptorSet(sets[i]);
1205         } else {
1206             enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1207         }
1208     }
1209 }
1210 
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)1211 void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1212                                                                      VkDescriptorPool pool) {
1213     std::vector<VkDescriptorSet> toClear =
1214         clearDescriptorPool(pool, mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate);
1215 
1216     for (auto set : toClear) {
1217         if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
1218             VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1219             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1220         }
1221         unregister_VkDescriptorSet(set);
1222         delete_goldfish_VkDescriptorSet(set);
1223     }
1224 }
1225 
unregister_VkDescriptorPool(VkDescriptorPool pool)1226 void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1227     if (!pool) return;
1228 
1229     std::lock_guard<std::recursive_mutex> lock(mLock);
1230 
1231     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1232     delete dp->allocInfo;
1233 
1234     info_VkDescriptorPool.erase(pool);
1235 }
1236 
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1237 void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1238                                                      VkDeviceSize* offset, uint32_t offsetCount,
1239                                                      VkDeviceSize* size, uint32_t sizeCount,
1240                                                      uint32_t* typeIndex, uint32_t typeIndexCount,
1241                                                      uint32_t* typeBits, uint32_t typeBitsCount) {
1242     (void)memory;
1243     (void)memoryCount;
1244     (void)offset;
1245     (void)offsetCount;
1246     (void)size;
1247     (void)sizeCount;
1248     (void)typeIndex;
1249     (void)typeIndexCount;
1250     (void)typeBits;
1251     (void)typeBitsCount;
1252 }
1253 
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1254 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1255     VkExternalMemoryProperties* pProperties, uint32_t) {
1256     VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1257 #ifdef VK_USE_PLATFORM_FUCHSIA
1258     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1259 #endif  // VK_USE_PLATFORM_FUCHSIA
1260 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1261     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1262                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1263 #endif  // VK_USE_PLATFORM_ANDROID_KHR
1264     if (supportedHandleType) {
1265         pProperties->compatibleHandleTypes &= supportedHandleType;
1266         pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1267     }
1268 }
1269 
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)1270 void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1271                                       const char* const* ppEnabledExtensionNames,
1272                                       uint32_t apiVersion) {
1273     std::lock_guard<std::recursive_mutex> lock(mLock);
1274     auto& info = info_VkInstance[instance];
1275     info.highestApiVersion = apiVersion;
1276 
1277     if (!ppEnabledExtensionNames) return;
1278 
1279     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1280         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1281     }
1282 }
1283 
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)1284 void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1285                                     VkPhysicalDeviceProperties props,
1286                                     VkPhysicalDeviceMemoryProperties memProps,
1287                                     uint32_t enabledExtensionCount,
1288                                     const char* const* ppEnabledExtensionNames, const void* pNext) {
1289     std::lock_guard<std::recursive_mutex> lock(mLock);
1290     auto& info = info_VkDevice[device];
1291     info.physdev = physdev;
1292     info.props = props;
1293     info.memProps = memProps;
1294     info.apiVersion = props.apiVersion;
1295 
1296     const VkBaseInStructure* extensionCreateInfo =
1297         reinterpret_cast<const VkBaseInStructure*>(pNext);
1298     while (extensionCreateInfo) {
1299         if (extensionCreateInfo->sType ==
1300             VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1301             auto deviceMemoryReportCreateInfo =
1302                 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1303                     extensionCreateInfo);
1304             if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1305                 info.deviceMemoryReportCallbacks.emplace_back(
1306                     deviceMemoryReportCreateInfo->pfnUserCallback,
1307                     deviceMemoryReportCreateInfo->pUserData);
1308             }
1309         }
1310         extensionCreateInfo = extensionCreateInfo->pNext;
1311     }
1312 
1313     if (!ppEnabledExtensionNames) return;
1314 
1315     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1316         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1317     }
1318 }
1319 
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,void * ahw,bool imported,zx_handle_t vmoHandle,VirtGpuResourcePtr blobPtr)1320 void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1321                                           VkDeviceSize allocationSize, uint8_t* ptr,
1322                                           uint32_t memoryTypeIndex, void* ahw, bool imported,
1323                                           zx_handle_t vmoHandle, VirtGpuResourcePtr blobPtr) {
1324     std::lock_guard<std::recursive_mutex> lock(mLock);
1325     auto& info = info_VkDeviceMemory[memory];
1326 
1327     info.device = device;
1328     info.allocationSize = allocationSize;
1329     info.ptr = ptr;
1330     info.memoryTypeIndex = memoryTypeIndex;
1331 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1332     info.ahw = (AHardwareBuffer*)ahw;
1333 #endif
1334     info.imported = imported;
1335     info.vmoHandle = vmoHandle;
1336     info.blobPtr = blobPtr;
1337 }
1338 
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)1339 void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1340                                    const VkImageCreateInfo* pCreateInfo) {
1341     std::lock_guard<std::recursive_mutex> lock(mLock);
1342     auto& info = info_VkImage[image];
1343 
1344     info.device = device;
1345     info.createInfo = *pCreateInfo;
1346 }
1347 
getMappedPointer(VkDeviceMemory memory)1348 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1349     std::lock_guard<std::recursive_mutex> lock(mLock);
1350     const auto it = info_VkDeviceMemory.find(memory);
1351     if (it == info_VkDeviceMemory.end()) return nullptr;
1352 
1353     const auto& info = it->second;
1354     return info.ptr;
1355 }
1356 
getMappedSize(VkDeviceMemory memory)1357 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1358     std::lock_guard<std::recursive_mutex> lock(mLock);
1359     const auto it = info_VkDeviceMemory.find(memory);
1360     if (it == info_VkDeviceMemory.end()) return 0;
1361 
1362     const auto& info = it->second;
1363     return info.allocationSize;
1364 }
1365 
isValidMemoryRange(const VkMappedMemoryRange & range)1366 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) {
1367     std::lock_guard<std::recursive_mutex> lock(mLock);
1368     const auto it = info_VkDeviceMemory.find(range.memory);
1369     if (it == info_VkDeviceMemory.end()) return false;
1370     const auto& info = it->second;
1371 
1372     if (!info.ptr) return false;
1373 
1374     VkDeviceSize offset = range.offset;
1375     VkDeviceSize size = range.size;
1376 
1377     if (size == VK_WHOLE_SIZE) {
1378         return offset <= info.allocationSize;
1379     }
1380 
1381     return offset + size <= info.allocationSize;
1382 }
1383 
setupCaps(uint32_t & noRenderControlEnc)1384 void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1385     VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1386     mCaps = instance->getCaps();
1387 
1388     // Delete once goldfish Linux drivers are gone
1389     if (mCaps.vulkanCapset.protocolVersion == 0) {
1390         mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1391     } else {
1392         // Don't query the render control encoder for features, since for virtio-gpu the
1393         // capabilities provide versioning. Set features to be unconditionally true, since
1394         // using virtio-gpu encompasses all prior goldfish features.  mFeatureInfo should be
1395         // deprecated in favor of caps.
1396         mFeatureInfo.hasVulkanNullOptionalStrings = true;
1397         mFeatureInfo.hasVulkanIgnoredHandles = true;
1398         mFeatureInfo.hasVulkanShaderFloat16Int8 = true;
1399         mFeatureInfo.hasVulkanQueueSubmitWithCommands = true;
1400         mFeatureInfo.hasDeferredVulkanCommands = true;
1401         mFeatureInfo.hasVulkanAsyncQueueSubmit = true;
1402         mFeatureInfo.hasVulkanCreateResourcesWithRequirements = true;
1403         mFeatureInfo.hasVirtioGpuNext = true;
1404         mFeatureInfo.hasVirtioGpuNativeSync = true;
1405         mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate = mCaps.vulkanCapset.vulkanBatchedDescriptorSetUpdate;
1406         mFeatureInfo.hasVulkanAsyncQsri = true;
1407 
1408         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1409         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1410         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1411         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1412     }
1413 
1414     noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1415 }
1416 
setupFeatures(const struct GfxStreamVkFeatureInfo * features)1417 void ResourceTracker::setupFeatures(const struct GfxStreamVkFeatureInfo* features) {
1418     if (mFeatureInfo.setupComplete) {
1419         return;
1420     }
1421 
1422     mFeatureInfo = *features;
1423 #if defined(__ANDROID__)
1424     if (mFeatureInfo.hasDirectMem) {
1425         mGoldfishAddressSpaceBlockProvider.reset(
1426             new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1427     }
1428 #endif  // defined(__ANDROID__)
1429 
1430 #ifdef VK_USE_PLATFORM_FUCHSIA
1431     if (mFeatureInfo.hasVulkan) {
1432         fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1433             GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1434         if (!channel) {
1435             mesa_loge("failed to open control device");
1436             abort();
1437         }
1438         mControlDevice =
1439             fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1440 
1441         fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1442             zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1443         if (!sysmem_channel) {
1444             mesa_loge("failed to open sysmem connection");
1445         }
1446         mSysmemAllocator =
1447             fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1448         char name[ZX_MAX_NAME_LEN] = {};
1449         zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1450         std::string client_name(name);
1451         client_name += "-goldfish";
1452         zx_info_handle_basic_t info;
1453         zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1454                            nullptr);
1455         mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1456                                              info.koid);
1457     }
1458 #endif
1459 
1460     if (mFeatureInfo.hasVulkanNullOptionalStrings) {
1461         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1462     }
1463     if (mFeatureInfo.hasVulkanIgnoredHandles) {
1464         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1465     }
1466     if (mFeatureInfo.hasVulkanShaderFloat16Int8) {
1467         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1468     }
1469     if (mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
1470         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1471     }
1472 
1473     mFeatureInfo.setupComplete = true;
1474 }
1475 
setupPlatformHelpers()1476 void ResourceTracker::setupPlatformHelpers() {
1477 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
1478     VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1479     auto deviceHandle = instance->getDeviceHandle();
1480     if (mGralloc == nullptr) {
1481         mGralloc.reset(gfxstream::createPlatformGralloc(deviceHandle));
1482     }
1483 #endif
1484 
1485     if (mSyncHelper == nullptr) {
1486         mSyncHelper.reset(gfxstream::createPlatformSyncHelper());
1487     }
1488 }
1489 
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1490 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1491     ResourceTracker::threadingCallbacks = callbacks;
1492 }
1493 
usingDirectMapping() const1494 bool ResourceTracker::usingDirectMapping() const { return true; }
1495 
getStreamFeatures() const1496 uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1497 
supportsDeferredCommands() const1498 bool ResourceTracker::supportsDeferredCommands() const {
1499     return mFeatureInfo.hasDeferredVulkanCommands;
1500 }
1501 
supportsAsyncQueueSubmit() const1502 bool ResourceTracker::supportsAsyncQueueSubmit() const {
1503     return mFeatureInfo.hasVulkanAsyncQueueSubmit;
1504 }
1505 
supportsCreateResourcesWithRequirements() const1506 bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1507     return mFeatureInfo.hasVulkanCreateResourcesWithRequirements;
1508 }
1509 
getHostInstanceExtensionIndex(const std::string & extName) const1510 int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1511     int i = 0;
1512     for (const auto& prop : mHostInstanceExtensions) {
1513         if (extName == std::string(prop.extensionName)) {
1514             return i;
1515         }
1516         ++i;
1517     }
1518     return -1;
1519 }
1520 
getHostDeviceExtensionIndex(const std::string & extName) const1521 int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1522     int i = 0;
1523     for (const auto& prop : mHostDeviceExtensions) {
1524         if (extName == std::string(prop.extensionName)) {
1525             return i;
1526         }
1527         ++i;
1528     }
1529     return -1;
1530 }
1531 
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1532 void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1533                                                    VkDeviceSize* offset, uint32_t offsetCount,
1534                                                    VkDeviceSize* size, uint32_t sizeCount,
1535                                                    uint32_t* typeIndex, uint32_t typeIndexCount,
1536                                                    uint32_t* typeBits, uint32_t typeBitsCount) {
1537     (void)memoryCount;
1538     (void)offsetCount;
1539     (void)sizeCount;
1540     (void)typeIndex;
1541     (void)typeIndexCount;
1542     (void)typeBits;
1543     (void)typeBitsCount;
1544 
1545     if (memory) {
1546         std::lock_guard<std::recursive_mutex> lock(mLock);
1547 
1548         for (uint32_t i = 0; i < memoryCount; ++i) {
1549             VkDeviceMemory mem = memory[i];
1550 
1551             auto it = info_VkDeviceMemory.find(mem);
1552             if (it == info_VkDeviceMemory.end()) return;
1553 
1554             const auto& info = it->second;
1555 
1556             if (!info.coherentMemory) continue;
1557 
1558             memory[i] = info.coherentMemory->getDeviceMemory();
1559 
1560             if (offset) {
1561                 offset[i] = info.coherentMemoryOffset + offset[i];
1562             }
1563 
1564             if (size && size[i] == VK_WHOLE_SIZE) {
1565                 size[i] = info.allocationSize;
1566             }
1567 
1568             // TODO
1569             (void)memory;
1570             (void)offset;
1571             (void)size;
1572         }
1573     }
1574 }
1575 
getColorBufferMemoryIndex(void * context,VkDevice device)1576 uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1577     // Create test image to get the memory requirements
1578     VkEncoder* enc = (VkEncoder*)context;
1579     VkImageCreateInfo createInfo = {
1580         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1581         .imageType = VK_IMAGE_TYPE_2D,
1582         .format = VK_FORMAT_R8G8B8A8_UNORM,
1583         .extent = {64, 64, 1},
1584         .mipLevels = 1,
1585         .arrayLayers = 1,
1586         .samples = VK_SAMPLE_COUNT_1_BIT,
1587         .tiling = VK_IMAGE_TILING_OPTIMAL,
1588         .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1589                  VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1590                  VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1591         .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
1592     };
1593     VkImage image = VK_NULL_HANDLE;
1594     VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1595 
1596     if (res != VK_SUCCESS) {
1597         return 0;
1598     }
1599 
1600     VkMemoryRequirements memReqs;
1601     enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1602     enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1603 
1604     const VkPhysicalDeviceMemoryProperties& memProps =
1605         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1606 
1607     // Currently, host looks for the last index that has with memory
1608     // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1609     VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1610     for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1611         if ((memReqs.memoryTypeBits & (1u << i)) &&
1612             (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1613             return i;
1614         }
1615     }
1616 
1617     return 0;
1618 }
1619 
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1620 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1621     void* context, VkResult, const char*, uint32_t* pPropertyCount,
1622     VkExtensionProperties* pProperties) {
1623     std::vector<const char*> allowedExtensionNames = {
1624         "VK_KHR_get_physical_device_properties2",
1625         "VK_KHR_sampler_ycbcr_conversion",
1626 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1627         "VK_KHR_external_semaphore_capabilities",
1628         "VK_KHR_external_memory_capabilities",
1629         "VK_KHR_external_fence_capabilities",
1630         "VK_EXT_debug_utils",
1631 #endif
1632     };
1633 
1634     VkEncoder* enc = (VkEncoder*)context;
1635 
1636     // Only advertise a select set of extensions.
1637     if (mHostInstanceExtensions.empty()) {
1638         uint32_t hostPropCount = 0;
1639         enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1640                                                     true /* do lock */);
1641         mHostInstanceExtensions.resize(hostPropCount);
1642 
1643         VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1644             nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1645 
1646         if (hostRes != VK_SUCCESS) {
1647             return hostRes;
1648         }
1649     }
1650 
1651     std::vector<VkExtensionProperties> filteredExts;
1652 
1653     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1654         auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1655         if (extIndex != -1) {
1656             filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1657         }
1658     }
1659 
1660     VkExtensionProperties anbExtProps[] = {
1661 #ifdef VK_USE_PLATFORM_FUCHSIA
1662         {"VK_KHR_external_memory_capabilities", 1},
1663         {"VK_KHR_external_semaphore_capabilities", 1},
1664 #endif
1665     };
1666 
1667     for (auto& anbExtProp : anbExtProps) {
1668         filteredExts.push_back(anbExtProp);
1669     }
1670 
1671     // Spec:
1672     //
1673     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1674     //
1675     // If pProperties is NULL, then the number of extensions properties
1676     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1677     // must point to a variable set by the user to the number of elements
1678     // in the pProperties array, and on return the variable is overwritten
1679     // with the number of structures actually written to pProperties. If
1680     // pPropertyCount is less than the number of extension properties
1681     // available, at most pPropertyCount structures will be written. If
1682     // pPropertyCount is smaller than the number of extensions available,
1683     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1684     // that not all the available properties were returned.
1685     //
1686     // pPropertyCount must be a valid pointer to a uint32_t value
1687     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1688 
1689     if (!pProperties) {
1690         *pPropertyCount = (uint32_t)filteredExts.size();
1691         return VK_SUCCESS;
1692     } else {
1693         auto actualExtensionCount = (uint32_t)filteredExts.size();
1694         if (*pPropertyCount > actualExtensionCount) {
1695             *pPropertyCount = actualExtensionCount;
1696         }
1697 
1698         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1699             pProperties[i] = filteredExts[i];
1700         }
1701 
1702         if (actualExtensionCount > *pPropertyCount) {
1703             return VK_INCOMPLETE;
1704         }
1705 
1706         return VK_SUCCESS;
1707     }
1708 }
1709 
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1710 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1711     void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1712     VkExtensionProperties* pProperties) {
1713     std::vector<const char*> allowedExtensionNames = {
1714         "VK_KHR_vulkan_memory_model",
1715         "VK_KHR_buffer_device_address",
1716         "VK_KHR_maintenance1",
1717         "VK_KHR_maintenance2",
1718         "VK_KHR_maintenance3",
1719         "VK_KHR_bind_memory2",
1720         "VK_KHR_dedicated_allocation",
1721         "VK_KHR_get_memory_requirements2",
1722         "VK_KHR_sampler_ycbcr_conversion",
1723         "VK_KHR_shader_float16_int8",
1724     // Timeline semaphores buggy in newer NVIDIA drivers
1725     // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1726 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1727         "VK_KHR_timeline_semaphore",
1728 #endif
1729         "VK_AMD_gpu_shader_half_float",
1730         "VK_NV_shader_subgroup_partitioned",
1731         "VK_KHR_shader_subgroup_extended_types",
1732         "VK_EXT_subgroup_size_control",
1733         "VK_EXT_provoking_vertex",
1734         "VK_KHR_line_rasterization",
1735         "VK_EXT_line_rasterization",
1736         "VK_KHR_shader_terminate_invocation",
1737         "VK_EXT_transform_feedback",
1738         "VK_EXT_primitive_topology_list_restart",
1739         "VK_EXT_index_type_uint8",
1740         "VK_EXT_load_store_op_none",
1741         "VK_EXT_swapchain_colorspace",
1742         "VK_EXT_image_robustness",
1743         "VK_EXT_custom_border_color",
1744         "VK_EXT_shader_stencil_export",
1745         "VK_KHR_image_format_list",
1746         "VK_KHR_incremental_present",
1747         "VK_KHR_pipeline_executable_properties",
1748         "VK_EXT_queue_family_foreign",
1749         "VK_EXT_scalar_block_layout",
1750         "VK_KHR_descriptor_update_template",
1751         "VK_KHR_storage_buffer_storage_class",
1752         "VK_EXT_depth_clip_enable",
1753         "VK_KHR_create_renderpass2",
1754         "VK_EXT_vertex_attribute_divisor",
1755         "VK_EXT_host_query_reset",
1756 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1757         "VK_KHR_external_semaphore",
1758         "VK_KHR_external_semaphore_fd",
1759         // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1760         "VK_KHR_external_memory",
1761         "VK_KHR_external_fence",
1762         "VK_KHR_external_fence_fd",
1763         "VK_EXT_device_memory_report",
1764 #endif
1765 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
1766         "VK_KHR_imageless_framebuffer",
1767 #endif
1768         // Vulkan 1.3
1769         "VK_KHR_synchronization2",
1770         "VK_EXT_private_data",
1771         "VK_EXT_color_write_enable",
1772     };
1773 
1774     VkEncoder* enc = (VkEncoder*)context;
1775 
1776     if (mHostDeviceExtensions.empty()) {
1777         uint32_t hostPropCount = 0;
1778         enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1779                                                   true /* do lock */);
1780         mHostDeviceExtensions.resize(hostPropCount);
1781 
1782         VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1783             physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1784 
1785         if (hostRes != VK_SUCCESS) {
1786             return hostRes;
1787         }
1788     }
1789 
1790     std::vector<VkExtensionProperties> filteredExts;
1791 
1792     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1793         auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1794         if (extIndex != -1) {
1795             filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1796         }
1797     }
1798 
1799     VkExtensionProperties anbExtProps[] = {
1800 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1801         {"VK_ANDROID_native_buffer", 7},
1802 #endif
1803 #ifdef VK_USE_PLATFORM_FUCHSIA
1804         {"VK_KHR_external_memory", 1},
1805         {"VK_KHR_external_semaphore", 1},
1806         {"VK_FUCHSIA_external_semaphore", 1},
1807 #endif
1808     };
1809 
1810     for (auto& anbExtProp : anbExtProps) {
1811         filteredExts.push_back(anbExtProp);
1812     }
1813 
1814     /*
1815      * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report
1816      * extension is present.  It's is filtered out when sent host side, since for a
1817      * virtual GPU this is quite difficult to implement.
1818      *
1819      * Mesa runtime checks physical device features.  So if the test tries to enable
1820      * device level extension without it definitely existing, the test will fail.
1821      *
1822      * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT,
1823      * but that's more involved.  Work around this by always advertising the extension.
1824      * Tracking bug: b/338270042
1825      */
1826     filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1});
1827 
1828 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1829     bool hostSupportsExternalFenceFd =
1830         getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1831     if (!hostSupportsExternalFenceFd) {
1832         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1833     }
1834 #endif
1835 
1836 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1837     bool hostHasPosixExternalSemaphore =
1838         getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1839     if (!hostHasPosixExternalSemaphore) {
1840         // Always advertise posix external semaphore capabilities on Android/Linux.
1841         // SYNC_FD handles will always work, regardless of host support. Support
1842         // for non-sync, opaque FDs, depends on host driver support, but will
1843         // be handled accordingly by host.
1844         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1845     }
1846 #endif
1847 
1848     bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1849     bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
1850     bool metalExtMemAvailable = getHostDeviceExtensionIndex("VK_EXT_external_memory_metal") != -1 ||
1851                                 getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
1852     bool qnxExtMemAvailable =
1853         getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
1854 
1855     bool hostHasExternalMemorySupport =
1856         win32ExtMemAvailable || posixExtMemAvailable || metalExtMemAvailable || qnxExtMemAvailable;
1857 
1858     if (hostHasExternalMemorySupport) {
1859 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1860         filteredExts.push_back(
1861             VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1862         filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1863 #endif
1864 #ifdef VK_USE_PLATFORM_FUCHSIA
1865         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1866         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1867 #endif
1868 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1869         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1870         filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1871         // In case the host doesn't support format modifiers, they are emulated
1872         // on guest side.
1873         filteredExts.push_back(VkExtensionProperties{"VK_EXT_image_drm_format_modifier", 1});
1874 #endif
1875     }
1876 
1877     // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1878     // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1879     // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1880     // (without the duplicate).
1881     std::sort(filteredExts.begin(),
1882               filteredExts.end(),
1883               [](const VkExtensionProperties& a,
1884                  const VkExtensionProperties& b) {
1885                   return strcmp(a.extensionName, b.extensionName) < 0;
1886               });
1887     filteredExts.erase(std::unique(filteredExts.begin(),
1888                                    filteredExts.end(),
1889                                    [](const VkExtensionProperties& a,
1890                                       const VkExtensionProperties& b) {
1891                                        return strcmp(a.extensionName, b.extensionName) == 0;
1892                                    }),
1893                        filteredExts.end());
1894 
1895     // Spec:
1896     //
1897     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1898     //
1899     // pPropertyCount is a pointer to an integer related to the number of
1900     // extension properties available or queried, and is treated in the
1901     // same fashion as the
1902     // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1903     //
1904     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1905     //
1906     // If pProperties is NULL, then the number of extensions properties
1907     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1908     // must point to a variable set by the user to the number of elements
1909     // in the pProperties array, and on return the variable is overwritten
1910     // with the number of structures actually written to pProperties. If
1911     // pPropertyCount is less than the number of extension properties
1912     // available, at most pPropertyCount structures will be written. If
1913     // pPropertyCount is smaller than the number of extensions available,
1914     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1915     // that not all the available properties were returned.
1916     //
1917     // pPropertyCount must be a valid pointer to a uint32_t value
1918 
1919     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1920 
1921     if (!pProperties) {
1922         *pPropertyCount = (uint32_t)filteredExts.size();
1923         return VK_SUCCESS;
1924     } else {
1925         auto actualExtensionCount = (uint32_t)filteredExts.size();
1926         if (*pPropertyCount > actualExtensionCount) {
1927             *pPropertyCount = actualExtensionCount;
1928         }
1929 
1930         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1931             pProperties[i] = filteredExts[i];
1932         }
1933 
1934         if (actualExtensionCount > *pPropertyCount) {
1935             return VK_INCOMPLETE;
1936         }
1937 
1938         return VK_SUCCESS;
1939     }
1940 }
1941 
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1942 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1943                                                         VkInstance instance,
1944                                                         uint32_t* pPhysicalDeviceCount,
1945                                                         VkPhysicalDevice* pPhysicalDevices) {
1946     VkEncoder* enc = (VkEncoder*)context;
1947 
1948     if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1949 
1950     if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1951 
1952     std::unique_lock<std::recursive_mutex> lock(mLock);
1953 
1954     // When this function is called, we actually need to do two things:
1955     // - Get full information about physical devices from the host,
1956     // even if the guest did not ask for it
1957     // - Serve the guest query according to the spec:
1958     //
1959     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1960 
1961     auto it = info_VkInstance.find(instance);
1962 
1963     if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1964 
1965     auto& info = it->second;
1966 
1967     // Get the full host information here if it doesn't exist already.
1968     if (info.physicalDevices.empty()) {
1969         uint32_t hostPhysicalDeviceCount = 0;
1970 
1971         lock.unlock();
1972         VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
1973                                                             nullptr, false /* no lock */);
1974         lock.lock();
1975 
1976         if (countRes != VK_SUCCESS) {
1977             mesa_loge(
1978                 "%s: failed: could not count host physical devices. "
1979                 "Error %d\n",
1980                 __func__, countRes);
1981             return countRes;
1982         }
1983 
1984         info.physicalDevices.resize(hostPhysicalDeviceCount);
1985 
1986         lock.unlock();
1987         VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1988             instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1989         lock.lock();
1990 
1991         if (enumRes != VK_SUCCESS) {
1992             mesa_loge(
1993                 "%s: failed: could not retrieve host physical devices. "
1994                 "Error %d\n",
1995                 __func__, enumRes);
1996             return enumRes;
1997         }
1998     }
1999 
2000     // Serve the guest query according to the spec.
2001     //
2002     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
2003     //
2004     // If pPhysicalDevices is NULL, then the number of physical devices
2005     // available is returned in pPhysicalDeviceCount. Otherwise,
2006     // pPhysicalDeviceCount must point to a variable set by the user to the
2007     // number of elements in the pPhysicalDevices array, and on return the
2008     // variable is overwritten with the number of handles actually written
2009     // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2010     // of physical devices available, at most pPhysicalDeviceCount
2011     // structures will be written.  If pPhysicalDeviceCount is smaller than
2012     // the number of physical devices available, VK_INCOMPLETE will be
2013     // returned instead of VK_SUCCESS, to indicate that not all the
2014     // available physical devices were returned.
2015 
2016     if (!pPhysicalDevices) {
2017         *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2018         return VK_SUCCESS;
2019     } else {
2020         uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2021         uint32_t toWrite =
2022             actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2023 
2024         for (uint32_t i = 0; i < toWrite; ++i) {
2025             pPhysicalDevices[i] = info.physicalDevices[i];
2026         }
2027 
2028         *pPhysicalDeviceCount = toWrite;
2029 
2030         if (actualDeviceCount > *pPhysicalDeviceCount) {
2031             return VK_INCOMPLETE;
2032         }
2033 
2034         return VK_SUCCESS;
2035     }
2036 }
2037 
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)2038 void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
2039                                                        VkPhysicalDeviceProperties* pProperties) {
2040 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2041     if (pProperties) {
2042         if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2043             /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2044              * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2045              * will take unexpected code paths to deal with "software" driver
2046              */
2047             pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2048         }
2049     }
2050 #endif
2051 }
2052 
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2053 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2054                                                       VkPhysicalDeviceFeatures2* pFeatures) {
2055     if (pFeatures) {
2056         VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2057             vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
2058         if (memoryReportFeaturesEXT) {
2059             memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2060         }
2061     }
2062 }
2063 
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2064 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2065                                                          VkPhysicalDevice physicalDevice,
2066                                                          VkPhysicalDeviceFeatures2* pFeatures) {
2067     on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2068 }
2069 
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2070 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2071                                                         VkPhysicalDevice physicalDevice,
2072                                                         VkPhysicalDeviceProperties2* pProperties) {
2073     if (pProperties) {
2074         VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2075             vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
2076         if (memoryReportFeaturesEXT) {
2077             memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2078         }
2079         on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
2080     }
2081 }
2082 
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2083 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2084     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2085     on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2086 }
2087 
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)2088 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2089     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2090     // gfxstream decides which physical device to expose to the guest on startup.
2091     // Otherwise, we would need a physical device to properties mapping.
2092     *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2093 }
2094 
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)2095 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2096     void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2097     on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2098 }
2099 
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)2100 void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2101                                           VkQueue* pQueue) {
2102     std::lock_guard<std::recursive_mutex> lock(mLock);
2103     info_VkQueue[*pQueue].device = device;
2104 }
2105 
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)2106 void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2107                                            VkQueue* pQueue) {
2108     std::lock_guard<std::recursive_mutex> lock(mLock);
2109     info_VkQueue[*pQueue].device = device;
2110 }
2111 
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)2112 VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2113                                               const VkInstanceCreateInfo* createInfo,
2114                                               const VkAllocationCallbacks*, VkInstance* pInstance) {
2115     if (input_result != VK_SUCCESS) return input_result;
2116 
2117     VkEncoder* enc = (VkEncoder*)context;
2118 
2119     uint32_t apiVersion;
2120     VkResult enumInstanceVersionRes =
2121         enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2122 
2123     setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2124                     createInfo->ppEnabledExtensionNames, apiVersion);
2125 
2126     return input_result;
2127 }
2128 
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)2129 VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2130                                             VkPhysicalDevice physicalDevice,
2131                                             const VkDeviceCreateInfo* pCreateInfo,
2132                                             const VkAllocationCallbacks*, VkDevice* pDevice) {
2133     if (input_result != VK_SUCCESS) return input_result;
2134 
2135     VkEncoder* enc = (VkEncoder*)context;
2136 
2137     VkPhysicalDeviceProperties props;
2138     VkPhysicalDeviceMemoryProperties memProps;
2139     enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2140     enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2141 
2142     setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2143                   pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2144 
2145     return input_result;
2146 }
2147 
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)2148 void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2149                                              const VkAllocationCallbacks*) {
2150     (void)context;
2151     std::lock_guard<std::recursive_mutex> lock(mLock);
2152 
2153     auto it = info_VkDevice.find(device);
2154     if (it == info_VkDevice.end()) return;
2155 
2156     for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2157         auto& memInfo = itr->second;
2158         if (memInfo.device == device) {
2159             itr = info_VkDeviceMemory.erase(itr);
2160         } else {
2161             itr++;
2162         }
2163     }
2164 }
2165 
2166 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
updateMemoryTypeBits(uint32_t * memoryTypeBits,uint32_t memoryIndex)2167 void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2168     *memoryTypeBits = 1u << memoryIndex;
2169 }
2170 #endif
2171 
2172 #ifdef VK_USE_PLATFORM_ANDROID_KHR
2173 
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)2174 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2175     void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2176     VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2177     // Delete once goldfish Linux drivers are gone
2178     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2179         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2180     }
2181 
2182     updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2183 
2184     return getAndroidHardwareBufferPropertiesANDROID(mGralloc.get(), buffer, pProperties);
2185 }
2186 
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)2187 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2188     void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2189     struct AHardwareBuffer** pBuffer) {
2190     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2191     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2192 
2193     std::lock_guard<std::recursive_mutex> lock(mLock);
2194 
2195     auto deviceIt = info_VkDevice.find(device);
2196 
2197     if (deviceIt == info_VkDevice.end()) {
2198         return VK_ERROR_INITIALIZATION_FAILED;
2199     }
2200 
2201     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2202 
2203     if (memoryIt == info_VkDeviceMemory.end()) {
2204         return VK_ERROR_INITIALIZATION_FAILED;
2205     }
2206 
2207     auto& info = memoryIt->second;
2208     VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(mGralloc.get(), &info.ahw);
2209 
2210     if (queryRes != VK_SUCCESS) return queryRes;
2211 
2212     *pBuffer = info.ahw;
2213 
2214     return queryRes;
2215 }
2216 #endif
2217 
2218 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2219 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2220     void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2221     uint32_t* pHandle) {
2222     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2223     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2224 
2225     std::lock_guard<std::recursive_mutex> lock(mLock);
2226 
2227     auto deviceIt = info_VkDevice.find(device);
2228 
2229     if (deviceIt == info_VkDevice.end()) {
2230         return VK_ERROR_INITIALIZATION_FAILED;
2231     }
2232 
2233     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2234 
2235     if (memoryIt == info_VkDeviceMemory.end()) {
2236         return VK_ERROR_INITIALIZATION_FAILED;
2237     }
2238 
2239     auto& info = memoryIt->second;
2240 
2241     if (info.vmoHandle == ZX_HANDLE_INVALID) {
2242         mesa_loge("%s: memory cannot be exported", __func__);
2243         return VK_ERROR_INITIALIZATION_FAILED;
2244     }
2245 
2246     *pHandle = ZX_HANDLE_INVALID;
2247     zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2248     return VK_SUCCESS;
2249 }
2250 
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)2251 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2252     void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2253     uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2254     using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2255     using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2256 
2257     if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2258         return VK_ERROR_INITIALIZATION_FAILED;
2259     }
2260 
2261     zx_info_handle_basic_t handleInfo;
2262     zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2263                                                            sizeof(handleInfo), nullptr, nullptr);
2264     if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2265         return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2266     }
2267 
2268     std::lock_guard<std::recursive_mutex> lock(mLock);
2269 
2270     auto deviceIt = info_VkDevice.find(device);
2271 
2272     if (deviceIt == info_VkDevice.end()) {
2273         return VK_ERROR_INITIALIZATION_FAILED;
2274     }
2275 
2276     auto& info = deviceIt->second;
2277 
2278     zx::vmo vmo_dup;
2279     status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2280     if (status != ZX_OK) {
2281         mesa_loge("zx_handle_duplicate() error: %d", status);
2282         return VK_ERROR_INITIALIZATION_FAILED;
2283     }
2284 
2285     uint32_t memoryProperty = 0u;
2286 
2287     auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2288     if (!result.ok()) {
2289         mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
2290         return VK_ERROR_INITIALIZATION_FAILED;
2291     }
2292     if (result.value().is_ok()) {
2293         memoryProperty = result.value().value()->info.memory_property();
2294     } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2295         // If a VMO is allocated while ColorBuffer/Buffer is not created,
2296         // it must be a device-local buffer, since for host-visible buffers,
2297         // ColorBuffer/Buffer is created at sysmem allocation time.
2298         memoryProperty = kMemoryPropertyDeviceLocal;
2299     } else {
2300         // Importing read-only host memory into the Vulkan driver should not
2301         // work, but it is not an error to try to do so. Returning a
2302         // VkMemoryZirconHandlePropertiesFUCHSIA with no available
2303         // memoryType bits should be enough for clients. See fxbug.dev/42098398
2304         // for other issues this this flow.
2305         mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value());
2306         pProperties->memoryTypeBits = 0;
2307         return VK_SUCCESS;
2308     }
2309 
2310     pProperties->memoryTypeBits = 0;
2311     for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2312         if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2313              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2314             ((memoryProperty & kMemoryPropertyHostVisible) &&
2315              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2316             pProperties->memoryTypeBits |= 1ull << i;
2317         }
2318     }
2319     return VK_SUCCESS;
2320 }
2321 
getEventKoid(zx_handle_t eventHandle)2322 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2323     if (eventHandle == ZX_HANDLE_INVALID) {
2324         return ZX_KOID_INVALID;
2325     }
2326 
2327     zx_info_handle_basic_t info;
2328     zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2329                                             nullptr, nullptr);
2330     if (status != ZX_OK) {
2331         mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status);
2332         return ZX_KOID_INVALID;
2333     }
2334     return info.koid;
2335 }
2336 
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)2337 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2338     void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2339     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2340     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2341 
2342     std::lock_guard<std::recursive_mutex> lock(mLock);
2343 
2344     auto deviceIt = info_VkDevice.find(device);
2345 
2346     if (deviceIt == info_VkDevice.end()) {
2347         return VK_ERROR_INITIALIZATION_FAILED;
2348     }
2349 
2350     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2351 
2352     if (semaphoreIt == info_VkSemaphore.end()) {
2353         return VK_ERROR_INITIALIZATION_FAILED;
2354     }
2355 
2356     auto& info = semaphoreIt->second;
2357 
2358     if (info.eventHandle != ZX_HANDLE_INVALID) {
2359         zx_handle_close(info.eventHandle);
2360     }
2361 #if VK_HEADER_VERSION < 174
2362     info.eventHandle = pInfo->handle;
2363 #else   // VK_HEADER_VERSION >= 174
2364     info.eventHandle = pInfo->zirconHandle;
2365 #endif  // VK_HEADER_VERSION < 174
2366     if (info.eventHandle != ZX_HANDLE_INVALID) {
2367         info.eventKoid = getEventKoid(info.eventHandle);
2368     }
2369 
2370     return VK_SUCCESS;
2371 }
2372 
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2373 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2374     void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2375     uint32_t* pHandle) {
2376     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2377     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2378 
2379     std::lock_guard<std::recursive_mutex> lock(mLock);
2380 
2381     auto deviceIt = info_VkDevice.find(device);
2382 
2383     if (deviceIt == info_VkDevice.end()) {
2384         return VK_ERROR_INITIALIZATION_FAILED;
2385     }
2386 
2387     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2388 
2389     if (semaphoreIt == info_VkSemaphore.end()) {
2390         return VK_ERROR_INITIALIZATION_FAILED;
2391     }
2392 
2393     auto& info = semaphoreIt->second;
2394 
2395     if (info.eventHandle == ZX_HANDLE_INVALID) {
2396         return VK_ERROR_INITIALIZATION_FAILED;
2397     }
2398 
2399     *pHandle = ZX_HANDLE_INVALID;
2400     zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2401     return VK_SUCCESS;
2402 }
2403 
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2404 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2405     void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2406     const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2407     fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2408 
2409     if (pInfo->collectionToken) {
2410         token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2411             zx::channel(pInfo->collectionToken));
2412     } else {
2413         auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2414         if (!endpoints.is_ok()) {
2415             mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2416             return VK_ERROR_INITIALIZATION_FAILED;
2417         }
2418 
2419         auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2420         if (!result.ok()) {
2421             mesa_loge("AllocateSharedCollection failed: %d", result.status());
2422             return VK_ERROR_INITIALIZATION_FAILED;
2423         }
2424         token_client = std::move(endpoints->client);
2425     }
2426 
2427     auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2428     if (!endpoints.is_ok()) {
2429         mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2430         return VK_ERROR_INITIALIZATION_FAILED;
2431     }
2432     auto [collection_client, collection_server] = std::move(endpoints.value());
2433 
2434     auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2435                                                          std::move(collection_server));
2436     if (!result.ok()) {
2437         mesa_loge("BindSharedCollection failed: %d", result.status());
2438         return VK_ERROR_INITIALIZATION_FAILED;
2439     }
2440 
2441     auto* sysmem_collection =
2442         new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2443     *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2444 
2445     register_VkBufferCollectionFUCHSIA(*pCollection);
2446     return VK_SUCCESS;
2447 }
2448 
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2449 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2450                                                           VkBufferCollectionFUCHSIA collection,
2451                                                           const VkAllocationCallbacks*) {
2452     auto sysmem_collection =
2453         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2454     if (sysmem_collection) {
2455         (*sysmem_collection)->Close();
2456     }
2457     delete sysmem_collection;
2458 
2459     unregister_VkBufferCollectionFUCHSIA(collection);
2460 }
2461 
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2462 SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2463     VkEncoder* enc, VkDevice device,
2464     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2465     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2466     const auto& collection = *pCollection;
2467     if (!pImageConstraintsInfo ||
2468         pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2469         mesa_loge("%s: invalid pImageConstraintsInfo", __func__);
2470         return {VK_ERROR_INITIALIZATION_FAILED};
2471     }
2472 
2473     if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2474         mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__);
2475         abort();
2476     }
2477 
2478     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2479         defaultBufferCollectionConstraints(
2480             /* min_size_bytes */ 0,
2481             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2482             pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2483             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2484             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2485             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2486 
2487     std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2488 
2489     VkPhysicalDevice physicalDevice;
2490     {
2491         std::lock_guard<std::recursive_mutex> lock(mLock);
2492         auto deviceIt = info_VkDevice.find(device);
2493         if (deviceIt == info_VkDevice.end()) {
2494             return {VK_ERROR_INITIALIZATION_FAILED};
2495         }
2496         physicalDevice = deviceIt->second.physdev;
2497     }
2498 
2499     std::vector<uint32_t> createInfoIndex;
2500 
2501     bool hasOptimalTiling = false;
2502     for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2503         const VkImageCreateInfo* createInfo =
2504             &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2505         const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2506             &pImageConstraintsInfo->pFormatConstraints[i];
2507 
2508         // add ImageFormatConstraints for *optimal* tiling
2509         VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2510         if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2511             optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2512                 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2513                 &constraints);
2514             if (optimalResult == VK_SUCCESS) {
2515                 createInfoIndex.push_back(i);
2516                 hasOptimalTiling = true;
2517             }
2518         }
2519 
2520         // Add ImageFormatConstraints for *linear* tiling
2521         VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2522             enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2523         if (linearResult == VK_SUCCESS) {
2524             createInfoIndex.push_back(i);
2525         }
2526 
2527         // Update usage and BufferMemoryConstraints
2528         if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2529             constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2530 
2531             if (formatConstraints && formatConstraints->flags) {
2532                 mesa_logw(
2533                     "%s: Non-zero flags (%08x) in image format "
2534                     "constraints; this is currently not supported, see "
2535                     "fxbug.dev/42147900.",
2536                     __func__, formatConstraints->flags);
2537             }
2538         }
2539     }
2540 
2541     // Set buffer memory constraints based on optimal/linear tiling support
2542     // and flags.
2543     VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2544     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2545         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2546     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2547         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2548     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2549         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2550     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2551         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2552 
2553     constraints.has_buffer_memory_constraints = true;
2554     auto& memory_constraints = constraints.buffer_memory_constraints;
2555     memory_constraints.cpu_domain_supported = true;
2556     memory_constraints.ram_domain_supported = true;
2557     memory_constraints.inaccessible_domain_supported =
2558         hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2559                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2560                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2561                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2562 
2563     if (memory_constraints.inaccessible_domain_supported) {
2564         memory_constraints.heap_permitted_count = 2;
2565         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2566         memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2567     } else {
2568         memory_constraints.heap_permitted_count = 1;
2569         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2570     }
2571 
2572     if (constraints.image_format_constraints_count == 0) {
2573         mesa_loge("%s: none of the specified formats is supported by device", __func__);
2574         return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2575     }
2576 
2577     constexpr uint32_t kVulkanPriority = 5;
2578     const char kName[] = "GoldfishSysmemShared";
2579     collection->SetName(kVulkanPriority, fidl::StringView(kName));
2580 
2581     auto result = collection->SetConstraints(true, constraints);
2582     if (!result.ok()) {
2583         mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
2584         return {VK_ERROR_INITIALIZATION_FAILED};
2585     }
2586 
2587     return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2588 }
2589 
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2590 VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2591     VkEncoder* enc, VkDevice device,
2592     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2593     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2594     const auto& collection = *pCollection;
2595 
2596     auto setConstraintsResult =
2597         setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2598     if (setConstraintsResult.result != VK_SUCCESS) {
2599         return setConstraintsResult.result;
2600     }
2601 
2602     // copy constraints to info_VkBufferCollectionFUCHSIA if
2603     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2604     std::lock_guard<std::recursive_mutex> lock(mLock);
2605     VkBufferCollectionFUCHSIA buffer_collection =
2606         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2607     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2608         info_VkBufferCollectionFUCHSIA.end()) {
2609         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2610             std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2611                 std::move(setConstraintsResult.constraints));
2612         info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2613             std::move(setConstraintsResult.createInfoIndex);
2614     }
2615 
2616     return VK_SUCCESS;
2617 }
2618 
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2619 VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2620     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2621     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2622     auto setConstraintsResult =
2623         setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2624     if (setConstraintsResult.result != VK_SUCCESS) {
2625         return setConstraintsResult.result;
2626     }
2627 
2628     // copy constraints to info_VkBufferCollectionFUCHSIA if
2629     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2630     std::lock_guard<std::recursive_mutex> lock(mLock);
2631     VkBufferCollectionFUCHSIA buffer_collection =
2632         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2633     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2634         info_VkBufferCollectionFUCHSIA.end()) {
2635         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2636             std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2637                 setConstraintsResult.constraints);
2638     }
2639 
2640     return VK_SUCCESS;
2641 }
2642 
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2643 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2644     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2645     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2646     VkEncoder* enc = (VkEncoder*)context;
2647     auto sysmem_collection =
2648         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2649     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2650                                                       pImageConstraintsInfo);
2651 }
2652 
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2653 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2654     void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2655     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2656     auto sysmem_collection =
2657         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2658     return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2659 }
2660 
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2661 VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
2662     VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2663     uint32_t* outCreateInfoIndex) {
2664     if (!info_VkBufferCollectionFUCHSIA[collection].constraints.has_value()) {
2665         mesa_loge("%s: constraints not set", __func__);
2666         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2667     }
2668 
2669     if (!info.settings.has_image_format_constraints) {
2670         // no image format constraints, skip getting createInfoIndex.
2671         return VK_SUCCESS;
2672     }
2673 
2674     const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2675     const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2676     const auto& out = info.settings.image_format_constraints;
2677     bool foundCreateInfo = false;
2678 
2679     for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2680          imageFormatIndex++) {
2681         const auto& in = constraints.image_format_constraints[imageFormatIndex];
2682         // These checks are sorted in order of how often they're expected to
2683         // mismatch, from most likely to least likely. They aren't always
2684         // equality comparisons, since sysmem may change some values in
2685         // compatible ways on behalf of the other participants.
2686         if ((out.pixel_format.type != in.pixel_format.type) ||
2687             (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2688             (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2689             (out.min_bytes_per_row < in.min_bytes_per_row) ||
2690             (out.required_max_coded_width < in.required_max_coded_width) ||
2691             (out.required_max_coded_height < in.required_max_coded_height) ||
2692             (in.bytes_per_row_divisor != 0 &&
2693              out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2694             continue;
2695         }
2696         // Check if the out colorspaces are a subset of the in color spaces.
2697         bool all_color_spaces_found = true;
2698         for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2699             bool found_matching_color_space = false;
2700             for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2701                 if (out.color_space[j].type == in.color_space[k].type) {
2702                     found_matching_color_space = true;
2703                     break;
2704                 }
2705             }
2706             if (!found_matching_color_space) {
2707                 all_color_spaces_found = false;
2708                 break;
2709             }
2710         }
2711         if (!all_color_spaces_found) {
2712             continue;
2713         }
2714 
2715         // Choose the first valid format for now.
2716         *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2717         return VK_SUCCESS;
2718     }
2719 
2720     mesa_loge("%s: cannot find a valid image format in constraints", __func__);
2721     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2722 }
2723 
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2724 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2725     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2726     VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2727     VkEncoder* enc = (VkEncoder*)context;
2728     const auto& sysmem_collection =
2729         *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2730 
2731     auto result = sysmem_collection->WaitForBuffersAllocated();
2732     if (!result.ok() || result->status != ZX_OK) {
2733         mesa_loge("Failed wait for allocation: %d %d", result.status(),
2734                   GET_STATUS_SAFE(result, status));
2735         return VK_ERROR_INITIALIZATION_FAILED;
2736     }
2737     fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2738 
2739     bool is_host_visible =
2740         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2741     bool is_device_local =
2742         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2743     if (!is_host_visible && !is_device_local) {
2744         mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)",
2745                   static_cast<uint64_t>(info.settings.buffer_settings.heap));
2746         return VK_ERROR_INITIALIZATION_FAILED;
2747     }
2748 
2749     // memoryTypeBits
2750     // ====================================================================
2751     {
2752         std::lock_guard<std::recursive_mutex> lock(mLock);
2753         auto deviceIt = info_VkDevice.find(device);
2754         if (deviceIt == info_VkDevice.end()) {
2755             return VK_ERROR_INITIALIZATION_FAILED;
2756         }
2757         auto& deviceInfo = deviceIt->second;
2758 
2759         // Device local memory type supported.
2760         pProperties->memoryTypeBits = 0;
2761         for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2762             if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2763                                      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2764                 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2765                                      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2766                 pProperties->memoryTypeBits |= 1ull << i;
2767             }
2768         }
2769     }
2770 
2771     // bufferCount
2772     // ====================================================================
2773     pProperties->bufferCount = info.buffer_count;
2774 
2775     auto storeProperties = [this, collection, pProperties]() -> VkResult {
2776         // store properties to storage
2777         std::lock_guard<std::recursive_mutex> lock(mLock);
2778         if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2779             info_VkBufferCollectionFUCHSIA.end()) {
2780             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2781         }
2782 
2783         info_VkBufferCollectionFUCHSIA[collection].properties =
2784             std::make_optional<VkBufferCollectionPropertiesFUCHSIA>(*pProperties);
2785 
2786         // We only do a shallow copy so we should remove all pNext pointers.
2787         info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2788         info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2789             nullptr;
2790         return VK_SUCCESS;
2791     };
2792 
2793     // The fields below only apply to buffer collections with image formats.
2794     if (!info.settings.has_image_format_constraints) {
2795         mesa_logd("%s: buffer collection doesn't have image format constraints", __func__);
2796         return storeProperties();
2797     }
2798 
2799     // sysmemFormat
2800     // ====================================================================
2801 
2802     pProperties->sysmemPixelFormat =
2803         static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2804 
2805     // colorSpace
2806     // ====================================================================
2807     if (info.settings.image_format_constraints.color_spaces_count == 0) {
2808         mesa_loge(
2809             "%s: color space missing from allocated buffer collection "
2810             "constraints",
2811             __func__);
2812         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2813     }
2814     // Only report first colorspace for now.
2815     pProperties->sysmemColorSpaceIndex.colorSpace =
2816         static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2817 
2818     // createInfoIndex
2819     // ====================================================================
2820     {
2821         std::lock_guard<std::recursive_mutex> lock(mLock);
2822         auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2823             collection, info, &pProperties->createInfoIndex);
2824         if (getIndexResult != VK_SUCCESS) {
2825             return getIndexResult;
2826         }
2827     }
2828 
2829     // formatFeatures
2830     // ====================================================================
2831     VkPhysicalDevice physicalDevice;
2832     {
2833         std::lock_guard<std::recursive_mutex> lock(mLock);
2834         auto deviceIt = info_VkDevice.find(device);
2835         if (deviceIt == info_VkDevice.end()) {
2836             return VK_ERROR_INITIALIZATION_FAILED;
2837         }
2838         physicalDevice = deviceIt->second.physdev;
2839     }
2840 
2841     VkFormat vkFormat =
2842         sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2843     VkFormatProperties formatProperties;
2844     enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2845                                              true /* do lock */);
2846     if (is_device_local) {
2847         pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2848     }
2849     if (is_host_visible) {
2850         pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2851     }
2852 
2853     // YCbCr properties
2854     // ====================================================================
2855     // TODO(59804): Implement this correctly when we support YUV pixel
2856     // formats in goldfish ICD.
2857     pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2858     pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2859     pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2860     pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2861     pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2862     pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2863     pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2864     pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2865 
2866     return storeProperties();
2867 }
2868 #endif
2869 
getVirglFormat(VkFormat vkFormat)2870 static uint32_t getVirglFormat(VkFormat vkFormat) {
2871     uint32_t virglFormat = 0;
2872 
2873     switch (vkFormat) {
2874         case VK_FORMAT_R8G8B8A8_SINT:
2875         case VK_FORMAT_R8G8B8A8_UNORM:
2876         case VK_FORMAT_R8G8B8A8_SRGB:
2877         case VK_FORMAT_R8G8B8A8_SNORM:
2878         case VK_FORMAT_R8G8B8A8_SSCALED:
2879         case VK_FORMAT_R8G8B8A8_USCALED:
2880             virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2881             break;
2882         case VK_FORMAT_B8G8R8A8_SINT:
2883         case VK_FORMAT_B8G8R8A8_UNORM:
2884         case VK_FORMAT_B8G8R8A8_SRGB:
2885         case VK_FORMAT_B8G8R8A8_SNORM:
2886         case VK_FORMAT_B8G8R8A8_SSCALED:
2887         case VK_FORMAT_B8G8R8A8_USCALED:
2888             virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2889             break;
2890         case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
2891             virglFormat = VIRGL_FORMAT_R10G10B10A2_UNORM;
2892             break;
2893         default:
2894             break;
2895     }
2896 
2897     return virglFormat;
2898 }
2899 
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2900 CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2901     VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2902     VkEncoder* enc, VkResult& res) {
2903     CoherentMemoryPtr coherentMemory = nullptr;
2904 
2905 #if defined(__ANDROID__)
2906     if (mFeatureInfo.hasDirectMem) {
2907         uint64_t gpuAddr = 0;
2908         GoldfishAddressSpaceBlockPtr block = nullptr;
2909         res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2910         if (res != VK_SUCCESS) {
2911             mesa_loge(
2912                 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2913                 "returned:%d.",
2914                 res);
2915             return coherentMemory;
2916         }
2917         {
2918             std::lock_guard<std::recursive_mutex> lock(mLock);
2919             auto it = info_VkDeviceMemory.find(mem);
2920             if (it == info_VkDeviceMemory.end()) {
2921                 mesa_loge("Failed to create coherent memory: failed to find device memory.");
2922                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2923                 return coherentMemory;
2924             }
2925             auto& info = it->second;
2926             block = info.goldfishBlock;
2927             info.goldfishBlock = nullptr;
2928 
2929             coherentMemory = std::make_shared<CoherentMemory>(
2930                 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2931         }
2932     } else
2933 #endif  // defined(__ANDROID__)
2934         if (mFeatureInfo.hasVirtioGpuNext) {
2935             struct VirtGpuCreateBlob createBlob = {0};
2936             uint64_t hvaSizeId[3];
2937             res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2938                                                         &hvaSizeId[2], true /* do lock */);
2939             if (res != VK_SUCCESS) {
2940                 mesa_loge(
2941                     "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2942                     "returned:%d.",
2943                     res);
2944                 return coherentMemory;
2945             }
2946             {
2947                 std::lock_guard<std::recursive_mutex> lock(mLock);
2948                 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2949                 createBlob.blobMem = kBlobMemHost3d;
2950                 createBlob.flags = kBlobFlagMappable;
2951                 createBlob.blobId = hvaSizeId[2];
2952                 createBlob.size = hostAllocationInfo.allocationSize;
2953 
2954                 auto blob = instance->createBlob(createBlob);
2955                 if (!blob) {
2956                     mesa_loge("Failed to create coherent memory: failed to create blob.");
2957                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2958                     return coherentMemory;
2959                 }
2960 
2961                 VirtGpuResourceMappingPtr mapping = blob->createMapping();
2962                 if (!mapping) {
2963                     mesa_loge("Failed to create coherent memory: failed to create blob mapping.");
2964                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2965                     return coherentMemory;
2966                 }
2967 
2968                 coherentMemory =
2969                     std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2970             }
2971         } else {
2972             mesa_loge("FATAL: Unsupported virtual memory feature");
2973             abort();
2974         }
2975     return coherentMemory;
2976 }
2977 
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)2978 VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
2979                                                  const VkMemoryAllocateInfo* pAllocateInfo,
2980                                                  VkEncoder* enc, VkDeviceMemory* pMemory) {
2981     uint64_t blobId = 0;
2982     uint64_t offset = 0;
2983     uint8_t* ptr = nullptr;
2984     VkMemoryAllocateFlagsInfo allocFlagsInfo;
2985     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
2986     VkCreateBlobGOOGLE createBlobInfo;
2987     VirtGpuResourcePtr guestBlob = nullptr;
2988 
2989     memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
2990     createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
2991 
2992     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
2993         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
2994     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
2995         vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
2996 
2997     bool deviceAddressMemoryAllocation =
2998         allocFlagsInfoPtr &&
2999         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3000          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3001 
3002     bool dedicated = deviceAddressMemoryAllocation;
3003 
3004     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3005         dedicated = true;
3006 
3007     VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3008     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3009 
3010     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3011         hostAllocationInfo.allocationSize =
3012             ALIGN_POT(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3013     } else if (dedicated) {
3014         // Over-aligning to kLargestSize to some Windows drivers (b:152769369).  Can likely
3015         // have host report the desired alignment.
3016         hostAllocationInfo.allocationSize =
3017             ALIGN_POT(pAllocateInfo->allocationSize, kLargestPageSize);
3018     } else {
3019         VkDeviceSize roundedUpAllocSize = ALIGN_POT(pAllocateInfo->allocationSize, kMegaByte);
3020         hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
3021     }
3022 
3023     // Support device address capture/replay allocations
3024     if (deviceAddressMemoryAllocation) {
3025         if (allocFlagsInfoPtr) {
3026             mesa_logd("%s: has alloc flags\n", __func__);
3027             allocFlagsInfo = *allocFlagsInfoPtr;
3028             vk_append_struct(&structChainIter, &allocFlagsInfo);
3029         }
3030 
3031         if (opaqueCaptureAddressAllocInfoPtr) {
3032             mesa_logd("%s: has opaque capture address\n", __func__);
3033             opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3034             vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3035         }
3036     }
3037 
3038     if (mCaps.params[kParamCreateGuestHandle]) {
3039         struct VirtGpuCreateBlob createBlob = {0};
3040         struct VirtGpuExecBuffer exec = {};
3041         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3042         struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3043 
3044         createBlobInfo.blobId = ++mAtomicId;
3045         createBlobInfo.blobMem = kBlobMemGuest;
3046         createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3047         vk_append_struct(&structChainIter, &createBlobInfo);
3048 
3049         createBlob.blobMem = kBlobMemGuest;
3050         createBlob.flags = kBlobFlagCreateGuestHandle;
3051         createBlob.blobId = createBlobInfo.blobId;
3052         createBlob.size = hostAllocationInfo.allocationSize;
3053 
3054         guestBlob = instance->createBlob(createBlob);
3055         if (!guestBlob) {
3056             mesa_loge("Failed to allocate coherent memory: failed to create blob.");
3057             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3058         }
3059 
3060         placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3061         exec.command = static_cast<void*>(&placeholderCmd);
3062         exec.command_size = sizeof(placeholderCmd);
3063         exec.flags = kRingIdx;
3064         exec.ring_idx = 1;
3065         if (instance->execBuffer(exec, guestBlob.get())) {
3066             mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3067             return VK_ERROR_OUT_OF_HOST_MEMORY;
3068         }
3069 
3070         guestBlob->wait();
3071     } else if (mCaps.vulkanCapset.deferredMapping) {
3072         createBlobInfo.blobId = ++mAtomicId;
3073         createBlobInfo.blobMem = kBlobMemHost3d;
3074         vk_append_struct(&structChainIter, &createBlobInfo);
3075     }
3076 
3077     VkDeviceMemory mem = VK_NULL_HANDLE;
3078     VkResult host_res =
3079         enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3080     if (host_res != VK_SUCCESS) {
3081         mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.",
3082                   host_res);
3083         return host_res;
3084     }
3085 
3086     struct VkDeviceMemory_Info info;
3087     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3088         info.allocationSize = pAllocateInfo->allocationSize;
3089         info.blobId = createBlobInfo.blobId;
3090     }
3091 
3092     if (guestBlob) {
3093         auto mapping = guestBlob->createMapping();
3094         if (!mapping) {
3095             mesa_loge("Failed to allocate coherent memory: failed to create blob mapping.");
3096             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3097         }
3098 
3099         auto coherentMemory = std::make_shared<CoherentMemory>(
3100             mapping, hostAllocationInfo.allocationSize, device, mem);
3101 
3102         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3103         info.coherentMemoryOffset = offset;
3104         info.coherentMemory = coherentMemory;
3105         info.ptr = ptr;
3106     }
3107 
3108     info.coherentMemorySize = hostAllocationInfo.allocationSize;
3109     info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3110     info.device = device;
3111     info.dedicated = dedicated;
3112     {
3113         // createCoherentMemory inside need to access info_VkDeviceMemory
3114         // information. set it before use.
3115         std::lock_guard<std::recursive_mutex> lock(mLock);
3116         info_VkDeviceMemory[mem] = info;
3117     }
3118 
3119     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3120         *pMemory = mem;
3121         return host_res;
3122     }
3123 
3124     auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3125     if (coherentMemory) {
3126         std::lock_guard<std::recursive_mutex> lock(mLock);
3127         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3128         info.allocationSize = pAllocateInfo->allocationSize;
3129         info.coherentMemoryOffset = offset;
3130         info.coherentMemory = coherentMemory;
3131         info.ptr = ptr;
3132         info_VkDeviceMemory[mem] = info;
3133         *pMemory = mem;
3134     } else {
3135         enc->vkFreeMemory(device, mem, nullptr, true);
3136         std::lock_guard<std::recursive_mutex> lock(mLock);
3137         info_VkDeviceMemory.erase(mem);
3138     }
3139     return host_res;
3140 }
3141 
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3142 VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3143                                             VkEncoder* enc, VkDevice device,
3144                                             VkDeviceMemory* pMemory) {
3145     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3146     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3147 
3148     // Add buffer device address capture structs
3149     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3150         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3151 
3152     bool dedicated =
3153         allocFlagsInfoPtr &&
3154         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3155          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3156 
3157     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3158         dedicated = true;
3159 
3160     CoherentMemoryPtr coherentMemory = nullptr;
3161     uint8_t* ptr = nullptr;
3162     uint64_t offset = 0;
3163     {
3164         std::lock_guard<std::recursive_mutex> lock(mLock);
3165         for (const auto& [memory, info] : info_VkDeviceMemory) {
3166             if (info.device != device) continue;
3167 
3168             if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3169 
3170             if (info.dedicated || dedicated) continue;
3171 
3172             if (!info.coherentMemory) continue;
3173 
3174             if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3175                 continue;
3176 
3177             coherentMemory = info.coherentMemory;
3178             break;
3179         }
3180         if (coherentMemory) {
3181             struct VkDeviceMemory_Info info;
3182             info.coherentMemoryOffset = offset;
3183             info.ptr = ptr;
3184             info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3185             info.allocationSize = pAllocateInfo->allocationSize;
3186             info.coherentMemory = coherentMemory;
3187             info.device = device;
3188 
3189             // for suballocated memory, create an alias VkDeviceMemory handle for application
3190             // memory used for suballocations will still be VkDeviceMemory associated with
3191             // CoherentMemory
3192             auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3193             info_VkDeviceMemory[mem] = info;
3194             *pMemory = mem;
3195             return VK_SUCCESS;
3196         }
3197     }
3198     return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3199 }
3200 
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3201 VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3202                                               const VkMemoryAllocateInfo* pAllocateInfo,
3203                                               const VkAllocationCallbacks* pAllocator,
3204                                               VkDeviceMemory* pMemory) {
3205 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result)                                      \
3206     {                                                                                          \
3207         auto it = info_VkDevice.find(device);                                                  \
3208         if (it == info_VkDevice.end()) return result;                                          \
3209         emitDeviceMemoryReport(it->second,                                                     \
3210                                VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0,    \
3211                                pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3212                                pAllocateInfo->memoryTypeIndex);                                \
3213         return result;                                                                         \
3214     }
3215 
3216     if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3217 
3218     VkEncoder* enc = (VkEncoder*)context;
3219 
3220     bool hasDedicatedImage = false;
3221     bool hasDedicatedBuffer = false;
3222 
3223     VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3224     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3225 
3226     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3227     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3228 
3229     // Add buffer device address capture structs
3230     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3231         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3232     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3233         vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3234 
3235     if (allocFlagsInfoPtr) {
3236         mesa_logd("%s: has alloc flags\n", __func__);
3237         allocFlagsInfo = *allocFlagsInfoPtr;
3238         vk_append_struct(&structChainIter, &allocFlagsInfo);
3239     }
3240 
3241     if (opaqueCaptureAddressAllocInfoPtr) {
3242         mesa_logd("%s: has opaque capture address\n", __func__);
3243         opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3244         vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3245     }
3246 
3247     VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3248     VkImportColorBufferGOOGLE importCbInfo = {
3249         VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3250         0,
3251     };
3252     VkImportBufferGOOGLE importBufferInfo = {
3253         VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3254         0,
3255     };
3256     // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3257     //     VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3258     // };
3259 
3260     const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3261         vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
3262 
3263 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3264     const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3265         vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
3266     // Even if we export allocate, the underlying operation
3267     // for the host is always going to be an import operation.
3268     // This is also how Intel's implementation works,
3269     // and is generally simpler;
3270     // even in an export allocation,
3271     // we perform AHardwareBuffer allocation
3272     // on the guest side, at this layer,
3273     // and then we attach a new VkDeviceMemory
3274     // to the AHardwareBuffer on the host via an "import" operation.
3275     AHardwareBuffer* ahw = nullptr;
3276 #else
3277     const void* importAhbInfoPtr = nullptr;
3278     void* ahw = nullptr;
3279 #endif
3280 
3281 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3282     const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3283         vk_find_struct<VkImportMemoryFdInfoKHR>(pAllocateInfo);
3284 #else
3285     const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3286 #endif
3287 
3288 #ifdef VK_USE_PLATFORM_FUCHSIA
3289     const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3290         vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
3291 
3292     const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3293         vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
3294 #else
3295     const void* importBufferCollectionInfoPtr = nullptr;
3296     const void* importVmoInfoPtr = nullptr;
3297 #endif  // VK_USE_PLATFORM_FUCHSIA
3298 
3299     const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3300         vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
3301 
3302     // Note for AHardwareBuffers, the Vulkan spec states:
3303     //
3304     //     Android hardware buffers have intrinsic width, height, format, and usage
3305     //     properties, so Vulkan images bound to memory imported from an Android
3306     //     hardware buffer must use dedicated allocations
3307     //
3308     // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3309     // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3310     // may or may not actually use a dedicated allocation to emulate
3311     // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3312     // host and the host will decide whether or not to use it.
3313 
3314     bool shouldPassThroughDedicatedAllocInfo =
3315         !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3316 
3317     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3318         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3319 
3320     const bool requestedMemoryIsHostVisible =
3321         isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3322 
3323 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
3324     shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3325 #endif  // VK_USE_PLATFORM_FUCHSIA
3326 
3327     if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3328         dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3329         vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3330     }
3331 
3332     // State needed for import/export.
3333     bool exportAhb = false;
3334     bool exportVmo = false;
3335     bool exportDmabuf = false;
3336     bool importAhb = false;
3337     bool importBufferCollection = false;
3338     bool importVmo = false;
3339     bool importDmabuf = false;
3340     (void)exportVmo;
3341 
3342     if (exportAllocateInfoPtr) {
3343         exportAhb = exportAllocateInfoPtr->handleTypes &
3344                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3345 #ifdef VK_USE_PLATFORM_FUCHSIA
3346         exportVmo = exportAllocateInfoPtr->handleTypes &
3347                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3348 #endif  // VK_USE_PLATFORM_FUCHSIA
3349         exportDmabuf =
3350             exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3351                                                   VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3352     } else if (importAhbInfoPtr) {
3353         importAhb = true;
3354     } else if (importBufferCollectionInfoPtr) {
3355         importBufferCollection = true;
3356     } else if (importVmoInfoPtr) {
3357         importVmo = true;
3358     }
3359 
3360     if (importFdInfoPtr) {
3361         importDmabuf =
3362             (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3363                                             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3364     }
3365     bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
3366 
3367 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3368     if (exportAhb) {
3369         hasDedicatedImage =
3370             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3371         hasDedicatedBuffer =
3372             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3373         VkExtent3D imageExtent = {0, 0, 0};
3374         uint32_t imageLayers = 0;
3375         VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3376         VkImageUsageFlags imageUsage = 0;
3377         VkImageCreateFlags imageCreateFlags = 0;
3378         VkDeviceSize bufferSize = 0;
3379         VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
3380 
3381         if (hasDedicatedImage) {
3382             std::lock_guard<std::recursive_mutex> lock(mLock);
3383 
3384             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3385             if (it == info_VkImage.end())
3386                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3387             const auto& info = it->second;
3388             const auto& imgCi = info.createInfo;
3389 
3390             imageExtent = imgCi.extent;
3391             imageLayers = imgCi.arrayLayers;
3392             imageFormat = imgCi.format;
3393             imageUsage = imgCi.usage;
3394             imageCreateFlags = imgCi.flags;
3395         }
3396 
3397         if (hasDedicatedBuffer) {
3398             std::lock_guard<std::recursive_mutex> lock(mLock);
3399 
3400             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3401             if (it == info_VkBuffer.end())
3402                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3403             const auto& info = it->second;
3404             const auto& bufCi = info.createInfo;
3405 
3406             bufferSize = bufCi.size;
3407         }
3408 
3409         VkResult ahbCreateRes = createAndroidHardwareBuffer(
3410             mGralloc.get(), hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers,
3411             imageFormat, imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
3412 
3413         if (ahbCreateRes != VK_SUCCESS) {
3414             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3415         }
3416     }
3417 
3418     if (importAhb) {
3419         ahw = importAhbInfoPtr->buffer;
3420         // We still need to acquire the AHardwareBuffer.
3421         importAndroidHardwareBuffer(mGralloc.get(), importAhbInfoPtr, nullptr);
3422     }
3423 
3424     if (ahw) {
3425         const uint32_t hostHandle = mGralloc->getHostHandle(ahw);
3426         if (mGralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3427             !mGralloc->treatBlobAsImage()) {
3428             importBufferInfo.buffer = hostHandle;
3429             vk_append_struct(&structChainIter, &importBufferInfo);
3430         } else {
3431             importCbInfo.colorBuffer = hostHandle;
3432             vk_append_struct(&structChainIter, &importCbInfo);
3433         }
3434     }
3435 #endif
3436     zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3437 
3438 #ifdef VK_USE_PLATFORM_FUCHSIA
3439     if (importBufferCollection) {
3440         const auto& collection =
3441             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3442                 importBufferCollectionInfoPtr->collection);
3443         auto result = collection->WaitForBuffersAllocated();
3444         if (!result.ok() || result->status != ZX_OK) {
3445             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3446                       GET_STATUS_SAFE(result, status));
3447             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3448         }
3449         fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3450         uint32_t index = importBufferCollectionInfoPtr->index;
3451         if (info.buffer_count < index) {
3452             mesa_loge("Invalid buffer index: %d", index);
3453             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3454         }
3455         vmo_handle = info.buffers[index].vmo.release();
3456     }
3457 
3458     if (importVmo) {
3459         vmo_handle = importVmoInfoPtr->handle;
3460     }
3461 
3462     if (exportVmo) {
3463         hasDedicatedImage =
3464             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3465         hasDedicatedBuffer =
3466             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3467 
3468         if (hasDedicatedImage && hasDedicatedBuffer) {
3469             mesa_loge(
3470                 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3471                 "of image and buffer must be VK_NULL_HANDLE.");
3472             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3473         }
3474 
3475         const VkImageCreateInfo* pImageCreateInfo = nullptr;
3476 
3477         VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3478             .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3479             .pNext = nullptr,
3480             .createInfo = {},
3481             .requiredFormatFeatures = 0,
3482             .bufferCollectionConstraints =
3483                 VkBufferCollectionConstraintsInfoFUCHSIA{
3484                     .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3485                     .pNext = nullptr,
3486                     .minBufferCount = 1,
3487                     .maxBufferCount = 0,
3488                     .minBufferCountForCamping = 0,
3489                     .minBufferCountForDedicatedSlack = 0,
3490                     .minBufferCountForSharedSlack = 0,
3491                 },
3492         };
3493         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3494 
3495         if (hasDedicatedImage) {
3496             std::lock_guard<std::recursive_mutex> lock(mLock);
3497 
3498             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3499             if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3500             const auto& imageInfo = it->second;
3501 
3502             pImageCreateInfo = &imageInfo.createInfo;
3503         }
3504 
3505         if (hasDedicatedBuffer) {
3506             std::lock_guard<std::recursive_mutex> lock(mLock);
3507 
3508             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3509             if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3510             const auto& bufferInfo = it->second;
3511 
3512             bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3513             pBufferConstraintsInfo = &bufferConstraintsInfo;
3514         }
3515 
3516         hasDedicatedImage =
3517             hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3518         hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3519                                                        pBufferConstraintsInfo);
3520 
3521         if (hasDedicatedImage || hasDedicatedBuffer) {
3522             auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3523             if (!token_ends.is_ok()) {
3524                 mesa_loge("zx_channel_create failed: %d", token_ends.status_value());
3525                 abort();
3526             }
3527 
3528             {
3529                 auto result =
3530                     mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3531                 if (!result.ok()) {
3532                     mesa_loge("AllocateSharedCollection failed: %d", result.status());
3533                     abort();
3534                 }
3535             }
3536 
3537             auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3538             if (!collection_ends.is_ok()) {
3539                 mesa_loge("zx_channel_create failed: %d", collection_ends.status_value());
3540                 abort();
3541             }
3542 
3543             {
3544                 auto result = mSysmemAllocator->BindSharedCollection(
3545                     std::move(token_ends->client), std::move(collection_ends->server));
3546                 if (!result.ok()) {
3547                     mesa_loge("BindSharedCollection failed: %d", result.status());
3548                     abort();
3549                 }
3550             }
3551 
3552             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3553                 std::move(collection_ends->client));
3554             if (hasDedicatedImage) {
3555                 // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA.
3556                 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3557                                                                      pImageCreateInfo);
3558                 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3559                     mesa_loge("setBufferCollectionConstraints failed: format %u is not supported",
3560                               pImageCreateInfo->format);
3561                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3562                 }
3563                 if (res != VK_SUCCESS) {
3564                     mesa_loge("setBufferCollectionConstraints failed: %d", res);
3565                     abort();
3566                 }
3567             }
3568 
3569             if (hasDedicatedBuffer) {
3570                 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3571                                                                            pBufferConstraintsInfo);
3572                 if (res != VK_SUCCESS) {
3573                     mesa_loge("setBufferCollectionBufferConstraints failed: %d", res);
3574                     abort();
3575                 }
3576             }
3577 
3578             {
3579                 auto result = collection->WaitForBuffersAllocated();
3580                 if (result.ok() && result->status == ZX_OK) {
3581                     fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3582                         result->buffer_collection_info;
3583                     if (!info.buffer_count) {
3584                         mesa_loge(
3585                             "WaitForBuffersAllocated returned "
3586                             "invalid count: %d",
3587                             info.buffer_count);
3588                         abort();
3589                     }
3590                     vmo_handle = info.buffers[0].vmo.release();
3591                 } else {
3592                     mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3593                               GET_STATUS_SAFE(result, status));
3594                     abort();
3595                 }
3596             }
3597 
3598             collection->Close();
3599 
3600             zx::vmo vmo_copy;
3601             zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3602                                                      vmo_copy.reset_and_get_address());
3603             if (status != ZX_OK) {
3604                 mesa_loge("Failed to duplicate VMO: %d", status);
3605                 abort();
3606             }
3607 
3608             if (pImageCreateInfo) {
3609                 // Only device-local images need to create color buffer; for
3610                 // host-visible images, the color buffer is already created
3611                 // when sysmem allocates memory. Here we use the |tiling|
3612                 // field of image creation info to determine if it uses
3613                 // host-visible memory.
3614                 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3615                 if (!isLinear) {
3616                     fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3617                     switch (pImageCreateInfo->format) {
3618                         case VK_FORMAT_B8G8R8A8_SINT:
3619                         case VK_FORMAT_B8G8R8A8_UNORM:
3620                         case VK_FORMAT_B8G8R8A8_SRGB:
3621                         case VK_FORMAT_B8G8R8A8_SNORM:
3622                         case VK_FORMAT_B8G8R8A8_SSCALED:
3623                         case VK_FORMAT_B8G8R8A8_USCALED:
3624                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3625                             break;
3626                         case VK_FORMAT_R8G8B8A8_SINT:
3627                         case VK_FORMAT_R8G8B8A8_UNORM:
3628                         case VK_FORMAT_R8G8B8A8_SRGB:
3629                         case VK_FORMAT_R8G8B8A8_SNORM:
3630                         case VK_FORMAT_R8G8B8A8_SSCALED:
3631                         case VK_FORMAT_R8G8B8A8_USCALED:
3632                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3633                             break;
3634                         case VK_FORMAT_R8_UNORM:
3635                         case VK_FORMAT_R8_UINT:
3636                         case VK_FORMAT_R8_USCALED:
3637                         case VK_FORMAT_R8_SNORM:
3638                         case VK_FORMAT_R8_SINT:
3639                         case VK_FORMAT_R8_SSCALED:
3640                         case VK_FORMAT_R8_SRGB:
3641                             format =
3642                                 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3643                             break;
3644                         case VK_FORMAT_R8G8_UNORM:
3645                         case VK_FORMAT_R8G8_UINT:
3646                         case VK_FORMAT_R8G8_USCALED:
3647                         case VK_FORMAT_R8G8_SNORM:
3648                         case VK_FORMAT_R8G8_SINT:
3649                         case VK_FORMAT_R8G8_SSCALED:
3650                         case VK_FORMAT_R8G8_SRGB:
3651                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3652                             break;
3653                         default:
3654                             mesa_loge("Unsupported format: %d", pImageCreateInfo->format);
3655                             abort();
3656                     }
3657 
3658                     fidl::Arena arena;
3659                     fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3660                     createParams.set_width(pImageCreateInfo->extent.width)
3661                         .set_height(pImageCreateInfo->extent.height)
3662                         .set_format(format)
3663                         .set_memory_property(
3664                             fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3665 
3666                     auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3667                                                                      std::move(createParams));
3668                     if (!result.ok() || result->res != ZX_OK) {
3669                         if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
3670                             mesa_logd(
3671                                 "CreateColorBuffer: color buffer already "
3672                                 "exists\n");
3673                         } else {
3674                             mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
3675                                       GET_STATUS_SAFE(result, res));
3676                             abort();
3677                         }
3678                     }
3679                 }
3680             }
3681 
3682             if (pBufferConstraintsInfo) {
3683                 fidl::Arena arena;
3684                 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3685                 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3686                     .set_memory_property(
3687                         fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3688 
3689                 auto result =
3690                     mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3691                 if (!result.ok() || result->is_error()) {
3692                     mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
3693                               GET_STATUS_SAFE(result, error_value()));
3694                     abort();
3695                 }
3696             }
3697         } else {
3698             mesa_logw(
3699                 "Dedicated image / buffer not available. Cannot create "
3700                 "BufferCollection to export VMOs.");
3701             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3702         }
3703     }
3704 
3705     if (vmo_handle != ZX_HANDLE_INVALID) {
3706         zx::vmo vmo_copy;
3707         zx_status_t status =
3708             zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3709         if (status != ZX_OK) {
3710             mesa_loge("Failed to duplicate VMO: %d", status);
3711             abort();
3712         }
3713         zx_status_t status2 = ZX_OK;
3714 
3715         auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3716         if (!result.ok() || result->res != ZX_OK) {
3717             mesa_loge("GetBufferHandle failed: %d:%d", result.status(),
3718                       GET_STATUS_SAFE(result, res));
3719         } else {
3720             fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3721             uint32_t buffer_handle = result->id;
3722 
3723             if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3724                 importBufferInfo.buffer = buffer_handle;
3725                 vk_append_struct(&structChainIter, &importBufferInfo);
3726             } else {
3727                 importCbInfo.colorBuffer = buffer_handle;
3728                 vk_append_struct(&structChainIter, &importCbInfo);
3729             }
3730         }
3731     }
3732 #endif
3733 
3734     VirtGpuResourcePtr bufferBlob = nullptr;
3735 #if defined(LINUX_GUEST_BUILD)
3736     if (exportDmabuf) {
3737         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3738         hasDedicatedImage =
3739             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3740         hasDedicatedBuffer =
3741             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3742 
3743         if (hasDedicatedImage) {
3744             VkImageCreateInfo imageCreateInfo;
3745             bool isDmaBufImage = false;
3746             {
3747                 std::lock_guard<std::recursive_mutex> lock(mLock);
3748 
3749                 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3750                 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3751                 const auto& imageInfo = it->second;
3752 
3753                 imageCreateInfo = imageInfo.createInfo;
3754                 isDmaBufImage = imageInfo.isDmaBufImage;
3755             }
3756 
3757             if (isDmaBufImage) {
3758                 const VkImageSubresource imageSubresource = {
3759                     .aspectMask = exportAllocateInfoPtr->handleTypes &
3760                                           VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
3761                                       ? VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT
3762                                       : VK_IMAGE_ASPECT_COLOR_BIT,
3763                     .mipLevel = 0,
3764                     .arrayLayer = 0,
3765                 };
3766                 VkSubresourceLayout subResourceLayout;
3767                 on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image,
3768                                                &imageSubresource, &subResourceLayout);
3769                 if (!subResourceLayout.rowPitch) {
3770                     mesa_loge("Failed to query stride for VirtGpu resource creation.");
3771                     return VK_ERROR_INITIALIZATION_FAILED;
3772                 }
3773 
3774                 uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3775                 if (!virglFormat) {
3776                     mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x",
3777                               imageCreateInfo.format);
3778                     return VK_ERROR_FORMAT_NOT_SUPPORTED;
3779                 }
3780                 const uint32_t target = PIPE_TEXTURE_2D;
3781                 uint32_t bind = VIRGL_BIND_RENDER_TARGET;
3782                 if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
3783                     bind |= VIRGL_BIND_LINEAR;
3784                 }
3785 
3786                 if (mCaps.vulkanCapset.alwaysBlob) {
3787                     struct gfxstreamResourceCreate3d create3d = {};
3788                     struct VirtGpuExecBuffer exec = {};
3789                     struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3790                     struct VirtGpuCreateBlob createBlob = {};
3791 
3792                     create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3793                     create3d.bind = bind;
3794                     create3d.target = target;
3795                     create3d.format = virglFormat;
3796                     create3d.width = imageCreateInfo.extent.width;
3797                     create3d.height = imageCreateInfo.extent.height;
3798                     create3d.blobId = ++mAtomicId;
3799 
3800                     createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3801                     createBlob.blobCmdSize = sizeof(create3d);
3802                     createBlob.blobMem = kBlobMemHost3d;
3803                     createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3804                     createBlob.blobId = create3d.blobId;
3805                     createBlob.size = finalAllocInfo.allocationSize;
3806 
3807                     bufferBlob = instance->createBlob(createBlob);
3808                     if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3809 
3810                     placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3811                     exec.command = static_cast<void*>(&placeholderCmd);
3812                     exec.command_size = sizeof(placeholderCmd);
3813                     exec.flags = kRingIdx;
3814                     exec.ring_idx = 1;
3815                     if (instance->execBuffer(exec, bufferBlob.get())) {
3816                         mesa_loge("Failed to execbuffer placeholder command.");
3817                         return VK_ERROR_OUT_OF_HOST_MEMORY;
3818                     }
3819 
3820                     if (bufferBlob->wait()) {
3821                         mesa_loge("Failed to wait for blob.");
3822                         return VK_ERROR_OUT_OF_HOST_MEMORY;
3823                     }
3824                 } else {
3825                     bufferBlob = instance->createResource(
3826                         imageCreateInfo.extent.width, imageCreateInfo.extent.height,
3827                         subResourceLayout.rowPitch,
3828                         subResourceLayout.rowPitch * imageCreateInfo.extent.height, virglFormat,
3829                         target, bind);
3830                     if (!bufferBlob) {
3831                         mesa_loge("Failed to create colorBuffer resource for Image memory");
3832                         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3833                     }
3834                     if (bufferBlob->wait()) {
3835                         mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3836                         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3837                     }
3838                 }
3839             } else {
3840                 mesa_logw(
3841                     "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory "
3842                     "allocation cannot be used to create exportable resource "
3843                     "(VkExportMemoryAllocateInfo).\n");
3844             }
3845         } else if (hasDedicatedBuffer) {
3846             uint32_t virglFormat = VIRGL_FORMAT_R8_UNORM;
3847             const uint32_t target = PIPE_BUFFER;
3848             uint32_t bind = VIRGL_BIND_LINEAR;
3849             uint32_t width = finalAllocInfo.allocationSize;
3850             uint32_t height = 1;
3851 
3852             if (mCaps.vulkanCapset.alwaysBlob) {
3853                 struct gfxstreamResourceCreate3d create3d = {};
3854                 struct VirtGpuExecBuffer exec = {};
3855                 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3856                 struct VirtGpuCreateBlob createBlob = {};
3857 
3858                 create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3859                 create3d.bind = bind;
3860                 create3d.target = target;
3861                 create3d.format = virglFormat;
3862                 create3d.width = width;
3863                 create3d.height = height;
3864                 create3d.blobId = ++mAtomicId;
3865 
3866                 createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3867                 createBlob.blobCmdSize = sizeof(create3d);
3868                 createBlob.blobMem = kBlobMemHost3d;
3869                 createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3870                 createBlob.blobId = create3d.blobId;
3871                 createBlob.size = width;
3872 
3873                 bufferBlob = instance->createBlob(createBlob);
3874                 if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3875 
3876                 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3877                 exec.command = static_cast<void*>(&placeholderCmd);
3878                 exec.command_size = sizeof(placeholderCmd);
3879                 exec.flags = kRingIdx;
3880                 exec.ring_idx = 1;
3881                 if (instance->execBuffer(exec, bufferBlob.get())) {
3882                     mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3883                     return VK_ERROR_OUT_OF_HOST_MEMORY;
3884                 }
3885 
3886                 bufferBlob->wait();
3887             } else {
3888                 bufferBlob = instance->createResource(width, height, width, width * height,
3889                                                       virglFormat, target, bind);
3890                 if (!bufferBlob) {
3891                     mesa_loge("Failed to create colorBuffer resource for Image memory");
3892                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3893                 }
3894                 if (bufferBlob->wait()) {
3895                     mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3896                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3897                 }
3898             }
3899         } else {
3900             mesa_logw(
3901                 "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
3902                 "VkMemoryDedicatedAllocateInfo::image to create external resource.");
3903         }
3904     }
3905 
3906     if (importDmabuf) {
3907         VirtGpuExternalHandle importHandle = {};
3908         importHandle.osHandle = importFdInfoPtr->fd;
3909         importHandle.type = kMemHandleDmabuf;
3910 
3911         auto instance = VirtGpuDevice::getInstance();
3912         bufferBlob = instance->importBlob(importHandle);
3913         if (!bufferBlob) {
3914             mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
3915             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3916         }
3917     }
3918 
3919     if (bufferBlob) {
3920         if (hasDedicatedBuffer) {
3921             importBufferInfo.buffer = bufferBlob->getResourceHandle();
3922             vk_append_struct(&structChainIter, &importBufferInfo);
3923         } else {
3924             importCbInfo.colorBuffer = bufferBlob->getResourceHandle();
3925             vk_append_struct(&structChainIter, &importCbInfo);
3926         }
3927     }
3928 #endif
3929 
3930     if (ahw || bufferBlob || !requestedMemoryIsHostVisible) {
3931         input_result =
3932             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3933 
3934         if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3935 
3936         VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3937         setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
3938                             isImport, vmo_handle, bufferBlob);
3939 
3940         uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
3941 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3942         if (ahw) {
3943             memoryObjectId = getAHardwareBufferId(ahw);
3944         }
3945 #endif
3946         emitDeviceMemoryReport(info_VkDevice[device],
3947                                isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
3948                                         : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
3949                                memoryObjectId, pAllocateInfo->allocationSize,
3950                                VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
3951                                pAllocateInfo->memoryTypeIndex);
3952         return VK_SUCCESS;
3953     }
3954 
3955 #ifdef VK_USE_PLATFORM_FUCHSIA
3956     if (vmo_handle != ZX_HANDLE_INVALID) {
3957         input_result =
3958             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3959 
3960         // Get VMO handle rights, and only use allowed rights to map the
3961         // host memory.
3962         zx_info_handle_basic handle_info;
3963         zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3964                                                 sizeof(handle_info), nullptr, nullptr);
3965         if (status != ZX_OK) {
3966             mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3967                       status);
3968             return VK_ERROR_OUT_OF_HOST_MEMORY;
3969         }
3970 
3971         zx_vm_option_t vm_permission = 0u;
3972         vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3973         vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3974 
3975         zx_paddr_t addr;
3976         status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3977                              finalAllocInfo.allocationSize, &addr);
3978         if (status != ZX_OK) {
3979             mesa_loge("%s: cannot map vmar: status %d.", __func__, status);
3980             return VK_ERROR_OUT_OF_HOST_MEMORY;
3981         }
3982 
3983         setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
3984                             reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
3985                             /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
3986         return VK_SUCCESS;
3987     }
3988 #endif
3989 
3990     // Host visible memory with direct mapping
3991     VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
3992     if (result != VK_SUCCESS) return result;
3993 
3994     uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
3995 
3996 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3997     if (ahw) {
3998         memoryObjectId = getAHardwareBufferId(ahw);
3999     }
4000 #endif
4001 
4002     emitDeviceMemoryReport(info_VkDevice[device],
4003                            isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
4004                                     : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
4005                            memoryObjectId, pAllocateInfo->allocationSize,
4006                            VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
4007                            pAllocateInfo->memoryTypeIndex);
4008     return VK_SUCCESS;
4009 }
4010 
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)4011 void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
4012                                       const VkAllocationCallbacks* pAllocateInfo) {
4013     std::unique_lock<std::recursive_mutex> lock(mLock);
4014 
4015     auto it = info_VkDeviceMemory.find(memory);
4016     if (it == info_VkDeviceMemory.end()) return;
4017     auto& info = it->second;
4018     uint64_t memoryObjectId = (uint64_t)(void*)memory;
4019 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4020     if (info.ahw) {
4021         memoryObjectId = getAHardwareBufferId(info.ahw);
4022     }
4023 #endif
4024 
4025     emitDeviceMemoryReport(info_VkDevice[device],
4026                            info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
4027                                          : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
4028                            memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
4029                            (uint64_t)(void*)memory);
4030 
4031 #ifdef VK_USE_PLATFORM_FUCHSIA
4032     if (info.vmoHandle && info.ptr) {
4033         zx_status_t status = zx_vmar_unmap(
4034             zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
4035         if (status != ZX_OK) {
4036             mesa_loge("%s: Cannot unmap ptr: status %d", __func__, status);
4037         }
4038         info.ptr = nullptr;
4039     }
4040 #endif
4041 
4042     if (!info.coherentMemory) {
4043         lock.unlock();
4044         VkEncoder* enc = (VkEncoder*)context;
4045         enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
4046         return;
4047     }
4048 
4049     auto coherentMemory = freeCoherentMemoryLocked(memory, info);
4050 
4051     // We have to release the lock before we could possibly free a
4052     // CoherentMemory, because that will call into VkEncoder, which
4053     // shouldn't be called when the lock is held.
4054     lock.unlock();
4055     coherentMemory = nullptr;
4056 }
4057 
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)4058 VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
4059                                          VkDeviceMemory memory, VkDeviceSize offset,
4060                                          VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
4061     if (host_result != VK_SUCCESS) {
4062         mesa_loge("%s: Host failed to map", __func__);
4063         return host_result;
4064     }
4065 
4066     std::unique_lock<std::recursive_mutex> lock(mLock);
4067 
4068     auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory);
4069     if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) {
4070         mesa_loge("%s: Failed to find VkDeviceMemory.", __func__);
4071         return VK_ERROR_MEMORY_MAP_FAILED;
4072     }
4073     auto& deviceMemoryInfo = deviceMemoryInfoIt->second;
4074 
4075     if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory &&
4076         !mCaps.params[kParamCreateGuestHandle]) {
4077         // NOTE: must not hold lock while calling into the encoder.
4078         lock.unlock();
4079         VkEncoder* enc = (VkEncoder*)context;
4080         VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false);
4081         if (vkResult != VK_SUCCESS) {
4082             mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__);
4083             return vkResult;
4084         }
4085         lock.lock();
4086 
4087         // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay.
4088 
4089         struct VirtGpuCreateBlob createBlob = {};
4090         createBlob.blobMem = kBlobMemHost3d;
4091         createBlob.flags = kBlobFlagMappable;
4092         createBlob.blobId = deviceMemoryInfo.blobId;
4093         createBlob.size = deviceMemoryInfo.coherentMemorySize;
4094 
4095         auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob);
4096         if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4097 
4098         VirtGpuResourceMappingPtr mapping = blob->createMapping();
4099         if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4100 
4101         auto coherentMemory =
4102             std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
4103 
4104         uint8_t* ptr;
4105         uint64_t offset;
4106         coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset);
4107 
4108         deviceMemoryInfo.coherentMemoryOffset = offset;
4109         deviceMemoryInfo.coherentMemory = coherentMemory;
4110         deviceMemoryInfo.ptr = ptr;
4111     }
4112 
4113     if (!deviceMemoryInfo.ptr) {
4114         mesa_loge("%s: VkDeviceMemory has nullptr.", __func__);
4115         return VK_ERROR_MEMORY_MAP_FAILED;
4116     }
4117 
4118     if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size >
4119                                   deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) {
4120         mesa_loge(
4121             "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
4122             "total 0x%llx",
4123             __func__, (unsigned long long)deviceMemoryInfo.allocationSize,
4124             (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset);
4125         return VK_ERROR_MEMORY_MAP_FAILED;
4126     }
4127 
4128     *ppData = deviceMemoryInfo.ptr + offset;
4129 
4130     return host_result;
4131 }
4132 
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4133 void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4134     // no-op
4135 }
4136 
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4137 void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4138                                                                 VkMemoryRequirements2* reqs2) {
4139     std::lock_guard<std::recursive_mutex> lock(mLock);
4140 
4141     auto it = info_VkImage.find(image);
4142     if (it == info_VkImage.end()) return;
4143 
4144     auto& info = it->second;
4145 
4146     if (!info.external || !info.externalCreateInfo.handleTypes) {
4147         transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4148         return;
4149     }
4150 
4151     transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4152 
4153     VkMemoryDedicatedRequirements* dedicatedReqs =
4154         vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4155 
4156     if (!dedicatedReqs) return;
4157 
4158     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4159 }
4160 
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4161 void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4162                                                                  VkMemoryRequirements2* reqs2) {
4163     std::lock_guard<std::recursive_mutex> lock(mLock);
4164 
4165     auto it = info_VkBuffer.find(buffer);
4166     if (it == info_VkBuffer.end()) return;
4167 
4168     auto& info = it->second;
4169 
4170     if (!info.external || !info.externalCreateInfo.handleTypes) {
4171         return;
4172     }
4173 
4174     VkMemoryDedicatedRequirements* dedicatedReqs =
4175         vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4176 
4177     if (!dedicatedReqs) return;
4178 
4179     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4180 }
4181 
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4182 VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4183                                            const VkImageCreateInfo* pCreateInfo,
4184                                            const VkAllocationCallbacks* pAllocator,
4185                                            VkImage* pImage) {
4186     VkEncoder* enc = (VkEncoder*)context;
4187 
4188     VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4189     if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4190         localCreateInfo.queueFamilyIndexCount = 0;
4191         localCreateInfo.pQueueFamilyIndices = nullptr;
4192     }
4193 
4194     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4195     VkExternalMemoryImageCreateInfo localExtImgCi;
4196 
4197     const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4198         vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
4199 
4200     if (extImgCiPtr) {
4201         localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4202         vk_append_struct(&structChainIter, &localExtImgCi);
4203     }
4204 
4205 #if defined(LINUX_GUEST_BUILD)
4206     bool isDmaBufImage = false;
4207     VkImageDrmFormatModifierExplicitCreateInfoEXT localDrmFormatModifierInfo;
4208     VkImageDrmFormatModifierListCreateInfoEXT localDrmFormatModifierList;
4209 
4210     if (extImgCiPtr &&
4211         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
4212         const wsi_image_create_info* wsiImageCi =
4213             vk_find_struct<wsi_image_create_info>(pCreateInfo);
4214         if (wsiImageCi && wsiImageCi->scanout) {
4215             // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec
4216             // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or
4217             // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is
4218             // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver,
4219             // remove the VK_IMAGE_CREATE_ALIAS_BIT here.
4220             localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
4221         }
4222 
4223         const VkImageDrmFormatModifierExplicitCreateInfoEXT* drmFmtMod =
4224             vk_find_struct<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo);
4225         const VkImageDrmFormatModifierListCreateInfoEXT* drmFmtModList =
4226             vk_find_struct<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo);
4227         if (drmFmtMod || drmFmtModList) {
4228             if (getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) !=
4229                 -1) {
4230                 // host supports DRM format modifiers => forward the struct
4231                 if (drmFmtMod) {
4232                     localDrmFormatModifierInfo = vk_make_orphan_copy(*drmFmtMod);
4233                     vk_append_struct(&structChainIter, &localDrmFormatModifierInfo);
4234                 }
4235                 if (drmFmtModList) {
4236                     localDrmFormatModifierList = vk_make_orphan_copy(*drmFmtModList);
4237                     vk_append_struct(&structChainIter, &localDrmFormatModifierList);
4238                 }
4239             } else {
4240                 bool canUseLinearModifier =
4241                     (drmFmtMod && drmFmtMod->drmFormatModifier == DRM_FORMAT_MOD_LINEAR) ||
4242                     std::any_of(
4243                         drmFmtModList->pDrmFormatModifiers,
4244                         drmFmtModList->pDrmFormatModifiers + drmFmtModList->drmFormatModifierCount,
4245                         [](const uint64_t mod) { return mod == DRM_FORMAT_MOD_LINEAR; });
4246                 // host doesn't support DRM format modifiers, try emulating
4247                 if (canUseLinearModifier) {
4248                     mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_LINEAR");
4249                     localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4250                 } else {
4251                     return VK_ERROR_VALIDATION_FAILED_EXT;
4252                 }
4253             }
4254         }
4255 
4256         isDmaBufImage = true;
4257     }
4258 #endif
4259 
4260 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4261     VkNativeBufferANDROID localAnb;
4262     const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4263     if (anbInfoPtr) {
4264         localAnb = vk_make_orphan_copy(*anbInfoPtr);
4265         vk_append_struct(&structChainIter, &localAnb);
4266     }
4267 
4268     VkExternalFormatANDROID localExtFormatAndroid;
4269     const VkExternalFormatANDROID* extFormatAndroidPtr =
4270         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4271     if (extFormatAndroidPtr) {
4272         localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4273 
4274         // Do not append external format android;
4275         // instead, replace the local image localCreateInfo format
4276         // with the corresponding Vulkan format
4277         if (extFormatAndroidPtr->externalFormat) {
4278             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4279             if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4280                 return VK_ERROR_VALIDATION_FAILED_EXT;
4281         }
4282     }
4283 #endif
4284 
4285 #ifdef VK_USE_PLATFORM_FUCHSIA
4286     const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4287         vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
4288 
4289     bool isSysmemBackedMemory = false;
4290 
4291     if (extImgCiPtr &&
4292         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4293         isSysmemBackedMemory = true;
4294     }
4295 
4296     if (extBufferCollectionPtr) {
4297         const auto& collection =
4298             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4299                 extBufferCollectionPtr->collection);
4300         uint32_t index = extBufferCollectionPtr->index;
4301         zx::vmo vmo;
4302 
4303         fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4304 
4305         auto result = collection->WaitForBuffersAllocated();
4306         if (result.ok() && result->status == ZX_OK) {
4307             info = std::move(result->buffer_collection_info);
4308             if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4309                 vmo = std::move(info.buffers[index].vmo);
4310             }
4311         } else {
4312             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
4313                       GET_STATUS_SAFE(result, status));
4314         }
4315 
4316         if (vmo.is_valid()) {
4317             zx::vmo vmo_dup;
4318             if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4319                 status != ZX_OK) {
4320                 mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status);
4321                 abort();
4322             }
4323 
4324             auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4325             if (!buffer_handle_result.ok()) {
4326                 mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__,
4327                           buffer_handle_result.status());
4328                 abort();
4329             }
4330             if (buffer_handle_result.value().res == ZX_OK) {
4331                 // Buffer handle already exists.
4332                 // If it is a ColorBuffer, no-op; Otherwise return error.
4333                 if (buffer_handle_result.value().type !=
4334                     fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4335                     mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__,
4336                               buffer_handle_result.value().id);
4337                     return VK_ERROR_OUT_OF_HOST_MEMORY;
4338                 }
4339             } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4340                 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4341                 auto format = info.settings.image_format_constraints.pixel_format.type ==
4342                                       fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4343                                   ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4344                                   : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4345 
4346                 uint32_t memory_property =
4347                     info.settings.buffer_settings.heap ==
4348                             fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4349                         ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4350                         : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4351 
4352                 fidl::Arena arena;
4353                 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4354                 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4355                     .set_height(info.settings.image_format_constraints.min_coded_height)
4356                     .set_format(format)
4357                     .set_memory_property(memory_property);
4358 
4359                 auto result =
4360                     mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4361                 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4362                     mesa_logd("CreateColorBuffer: color buffer already exists\n");
4363                 } else if (!result.ok() || result->res != ZX_OK) {
4364                     mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
4365                               GET_STATUS_SAFE(result, res));
4366                 }
4367             }
4368 
4369             if (info.settings.buffer_settings.heap ==
4370                 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4371                 mesa_logd(
4372                     "%s: Image uses host visible memory heap; set tiling "
4373                     "to linear to match host ImageCreateInfo",
4374                     __func__);
4375                 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4376             }
4377         }
4378         isSysmemBackedMemory = true;
4379     }
4380 
4381     if (isSysmemBackedMemory) {
4382         localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4383     }
4384 #endif
4385 
4386     VkResult res;
4387     VkMemoryRequirements memReqs;
4388 
4389     if (supportsCreateResourcesWithRequirements()) {
4390         res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4391                                                        &memReqs, true /* do lock */);
4392     } else {
4393         res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4394     }
4395 
4396     if (res != VK_SUCCESS) return res;
4397 
4398     std::lock_guard<std::recursive_mutex> lock(mLock);
4399 
4400     auto it = info_VkImage.find(*pImage);
4401     if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4402 
4403     auto& info = it->second;
4404 
4405     info.device = device;
4406     info.createInfo = *pCreateInfo;
4407     info.createInfo.pNext = nullptr;
4408 
4409 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4410     if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4411         info.hasExternalFormat = true;
4412         info.externalFourccFormat = extFormatAndroidPtr->externalFormat;
4413     }
4414 #endif  // VK_USE_PLATFORM_ANDROID_KHR
4415 
4416     if (supportsCreateResourcesWithRequirements()) {
4417         info.baseRequirementsKnown = true;
4418     }
4419 
4420     if (extImgCiPtr) {
4421         info.external = true;
4422         info.externalCreateInfo = *extImgCiPtr;
4423     }
4424 
4425 #ifdef VK_USE_PLATFORM_FUCHSIA
4426     if (isSysmemBackedMemory) {
4427         info.isSysmemBackedMemory = true;
4428     }
4429 #endif
4430 
4431 // Delete `protocolVersion` check goldfish drivers are gone.
4432 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4433     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4434         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4435     }
4436     if ((extImgCiPtr && (extImgCiPtr->handleTypes &
4437                          VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4438         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4439     }
4440 #endif
4441 #if defined(LINUX_GUEST_BUILD)
4442     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4443         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4444     }
4445     info.isDmaBufImage = isDmaBufImage;
4446     if (info.isDmaBufImage) {
4447         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4448         if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
4449             // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain
4450             // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to
4451             // determine the stride for colorBuffer resource creation (guest-side dmabuf resource).
4452             // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage
4453             // in question. As long as these two use cases match, the rowPitch won't actually be
4454             // used by WSI.
4455             VkImageCreateInfo linearPeerImageCreateInfo = {
4456                 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4457                 .pNext = nullptr,
4458                 .flags = {},
4459                 .imageType = VK_IMAGE_TYPE_2D,
4460                 .format = localCreateInfo.format,
4461                 .extent = localCreateInfo.extent,
4462                 .mipLevels = 1,
4463                 .arrayLayers = 1,
4464                 .samples = VK_SAMPLE_COUNT_1_BIT,
4465                 .tiling = VK_IMAGE_TILING_LINEAR,
4466                 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
4467                 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
4468                 .queueFamilyIndexCount = 0,
4469                 .pQueueFamilyIndices = nullptr,
4470                 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
4471             };
4472             res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator,
4473                                      &info.linearPeerImage, true /* do lock */);
4474             if (res != VK_SUCCESS) return res;
4475         }
4476     }
4477 #endif
4478 
4479     if (info.baseRequirementsKnown) {
4480         transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4481         info.baseRequirements = memReqs;
4482     }
4483     return res;
4484 }
4485 
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4486 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4487     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4488     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4489     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4490 
4491 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4492     const VkExternalFormatANDROID* extFormatAndroidPtr =
4493         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4494     if (extFormatAndroidPtr) {
4495         if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4496             // We don't support external formats on host and it causes RGB565
4497             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4498             // when passed as an external format.
4499             // We may consider doing this for all external formats.
4500             // See b/134771579.
4501             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4502             return VK_SUCCESS;
4503         } else if (extFormatAndroidPtr->externalFormat) {
4504             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4505         }
4506     }
4507 #endif
4508 
4509     VkEncoder* enc = (VkEncoder*)context;
4510     VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4511                                                        pYcbcrConversion, true /* do lock */);
4512 
4513     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4514         mesa_loge(
4515             "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4516             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4517         abort();
4518     }
4519     return res;
4520 }
4521 
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4522 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4523                                                          VkSamplerYcbcrConversion ycbcrConversion,
4524                                                          const VkAllocationCallbacks* pAllocator) {
4525     VkEncoder* enc = (VkEncoder*)context;
4526     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4527         enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4528                                              true /* do lock */);
4529     }
4530 }
4531 
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4532 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4533     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4534     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4535     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4536 
4537 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4538     const VkExternalFormatANDROID* extFormatAndroidPtr =
4539         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4540     if (extFormatAndroidPtr) {
4541         if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4542             // We don't support external formats on host and it causes RGB565
4543             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4544             // when passed as an external format.
4545             // We may consider doing this for all external formats.
4546             // See b/134771579.
4547             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4548             return VK_SUCCESS;
4549         } else if (extFormatAndroidPtr->externalFormat) {
4550             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4551         }
4552     }
4553 #endif
4554 
4555     VkEncoder* enc = (VkEncoder*)context;
4556     VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4557                                                           pYcbcrConversion, true /* do lock */);
4558 
4559     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4560         mesa_loge(
4561             "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4562             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4563         abort();
4564     }
4565     return res;
4566 }
4567 
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4568 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4569     void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4570     const VkAllocationCallbacks* pAllocator) {
4571     VkEncoder* enc = (VkEncoder*)context;
4572     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4573         enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4574                                                 true /* do lock */);
4575     }
4576 }
4577 
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4578 VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4579                                              const VkSamplerCreateInfo* pCreateInfo,
4580                                              const VkAllocationCallbacks* pAllocator,
4581                                              VkSampler* pSampler) {
4582     VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4583     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4584 
4585 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4586     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4587     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4588         vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4589     if (samplerYcbcrConversionInfo) {
4590         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4591             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4592             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4593         }
4594     }
4595 
4596     VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4597     const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4598         vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo);
4599     if (samplerCustomBorderColorCreateInfo) {
4600         localVkSamplerCustomBorderColorCreateInfo =
4601             vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4602         vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4603     }
4604 #endif
4605 
4606     VkEncoder* enc = (VkEncoder*)context;
4607     return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4608 }
4609 
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4610 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4611     void* context, VkPhysicalDevice physicalDevice,
4612     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4613     VkExternalFenceProperties* pExternalFenceProperties) {
4614     (void)context;
4615     (void)physicalDevice;
4616 
4617     pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4618     pExternalFenceProperties->compatibleHandleTypes = 0;
4619     pExternalFenceProperties->externalFenceFeatures = 0;
4620 
4621     bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4622 
4623     if (!syncFd) {
4624         return;
4625     }
4626 
4627 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4628     pExternalFenceProperties->exportFromImportedHandleTypes =
4629         VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4630     pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4631     pExternalFenceProperties->externalFenceFeatures =
4632         VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4633 #endif
4634 }
4635 
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4636 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4637     void* context, VkPhysicalDevice physicalDevice,
4638     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4639     VkExternalFenceProperties* pExternalFenceProperties) {
4640     on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4641                                                   pExternalFenceProperties);
4642 }
4643 
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4644 VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4645                                            const VkFenceCreateInfo* pCreateInfo,
4646                                            const VkAllocationCallbacks* pAllocator,
4647                                            VkFence* pFence) {
4648     VkEncoder* enc = (VkEncoder*)context;
4649     VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4650 
4651     const VkExportFenceCreateInfo* exportFenceInfoPtr =
4652         vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
4653 
4654 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4655     bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4656                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4657 #endif
4658 
4659     input_result =
4660         enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4661 
4662     if (input_result != VK_SUCCESS) return input_result;
4663 
4664 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4665     if (exportSyncFd) {
4666         if (!mFeatureInfo.hasVirtioGpuNativeSync) {
4667             mesa_logd("%s: ensure sync device\n", __func__);
4668             ensureSyncDeviceFd();
4669         }
4670 
4671         mesa_logd("%s: getting fence info\n", __func__);
4672         std::lock_guard<std::recursive_mutex> lock(mLock);
4673         auto it = info_VkFence.find(*pFence);
4674 
4675         if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4676 
4677         auto& info = it->second;
4678 
4679         info.external = true;
4680         info.exportFenceCreateInfo = *exportFenceInfoPtr;
4681         mesa_logd("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4682         // syncFd is still -1 because we expect user to explicitly
4683         // export it via vkGetFenceFdKHR
4684     }
4685 #endif
4686 
4687     return input_result;
4688 }
4689 
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4690 void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4691                                         const VkAllocationCallbacks* pAllocator) {
4692     VkEncoder* enc = (VkEncoder*)context;
4693     enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4694 }
4695 
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4696 VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4697                                            uint32_t fenceCount, const VkFence* pFences) {
4698     VkEncoder* enc = (VkEncoder*)context;
4699     VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4700 
4701     if (res != VK_SUCCESS) return res;
4702 
4703     if (!fenceCount) return res;
4704 
4705     // Permanence: temporary
4706     // on fence reset, close the fence fd
4707     // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4708     std::lock_guard<std::recursive_mutex> lock(mLock);
4709     for (uint32_t i = 0; i < fenceCount; ++i) {
4710         VkFence fence = pFences[i];
4711         auto it = info_VkFence.find(fence);
4712         auto& info = it->second;
4713         if (!info.external) continue;
4714 
4715 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4716         if (info.syncFd && *info.syncFd >= 0) {
4717             mesa_logd("%s: resetting fence. make fd -1\n", __func__);
4718             goldfish_sync_signal(*info.syncFd);
4719             mSyncHelper->close(*info.syncFd);
4720         }
4721         info.syncFd.reset();
4722 #endif
4723     }
4724 
4725     return res;
4726 }
4727 
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4728 VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4729                                                 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4730     (void)context;
4731     (void)device;
4732     (void)pImportFenceFdInfo;
4733 
4734     // Transference: copy
4735     // meaning dup() the incoming fd
4736 
4737     VkEncoder* enc = (VkEncoder*)context;
4738 
4739     bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4740 
4741     if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4742 
4743 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4744 
4745     bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4746 
4747     if (!syncFdImport) {
4748         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4749         return VK_ERROR_OUT_OF_HOST_MEMORY;
4750     }
4751 
4752     std::lock_guard<std::recursive_mutex> lock(mLock);
4753     auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4754     if (it == info_VkFence.end()) {
4755         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4756         return VK_ERROR_OUT_OF_HOST_MEMORY;
4757     }
4758 
4759     auto& info = it->second;
4760 
4761 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4762     if (info.syncFd && *info.syncFd >= 0) {
4763         mesa_logd("%s: previous sync fd exists, close it\n", __func__);
4764         goldfish_sync_signal(*info.syncFd);
4765         mSyncHelper->close(*info.syncFd);
4766     }
4767 #endif
4768 
4769     if (pImportFenceFdInfo->fd < 0) {
4770         mesa_logd("%s: import -1, set to -1 and exit\n", __func__);
4771         info.syncFd = -1;
4772     } else {
4773         mesa_logd("%s: import actual fd, dup and close()\n", __func__);
4774 
4775         int fenceCopy = mSyncHelper->dup(pImportFenceFdInfo->fd);
4776         if (fenceCopy < 0) {
4777             mesa_loge("Failed to dup() import sync fd.");
4778             return VK_ERROR_OUT_OF_HOST_MEMORY;
4779         }
4780 
4781         info.syncFd = fenceCopy;
4782 
4783         mSyncHelper->close(pImportFenceFdInfo->fd);
4784     }
4785     return VK_SUCCESS;
4786 #else
4787     return VK_ERROR_OUT_OF_HOST_MEMORY;
4788 #endif
4789 }
4790 
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4791 VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4792                                              const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4793     // export operation.
4794     // first check if fence is signaled
4795     // then if so, return -1
4796     // else, queue work
4797 
4798     VkEncoder* enc = (VkEncoder*)context;
4799 
4800     bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4801 
4802     if (!hasFence) {
4803         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4804         return VK_ERROR_OUT_OF_HOST_MEMORY;
4805     }
4806 
4807 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4808     bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4809 
4810     if (!syncFdExport) {
4811         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4812         return VK_ERROR_OUT_OF_HOST_MEMORY;
4813     }
4814 
4815     VkResult currentFenceStatus =
4816         enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4817 
4818     if (VK_ERROR_DEVICE_LOST == currentFenceStatus) {  // Other error
4819         mesa_loge("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4820         *pFd = -1;
4821         return VK_ERROR_DEVICE_LOST;
4822     }
4823 
4824     if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4825         // Fence is valid. We also create a new sync fd for a signaled
4826         // fence, because ANGLE will use the returned fd directly to
4827         // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4828         // when error occurs.
4829         std::lock_guard<std::recursive_mutex> lock(mLock);
4830 
4831         auto it = info_VkFence.find(pGetFdInfo->fence);
4832         if (it == info_VkFence.end()) {
4833             mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4834             return VK_ERROR_OUT_OF_HOST_MEMORY;
4835         }
4836 
4837         auto& info = it->second;
4838 
4839         bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4840                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4841 
4842         if (!syncFdCreated) {
4843             mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4844             return VK_ERROR_OUT_OF_HOST_MEMORY;
4845         }
4846 
4847         if (mFeatureInfo.hasVirtioGpuNativeSync) {
4848             VkResult result;
4849             int64_t osHandle;
4850             uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4851 
4852             result = createFence(device, hostFenceHandle, osHandle);
4853             if (result != VK_SUCCESS) return result;
4854 
4855             *pFd = osHandle;
4856         } else {
4857 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4858             goldfish_sync_queue_work(
4859                 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4860                 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4861                 pFd);
4862 #endif
4863         }
4864 
4865         // relinquish ownership
4866         info.syncFd.reset();
4867 
4868         mesa_logd("%s: got fd: %d\n", __func__, *pFd);
4869         return VK_SUCCESS;
4870     }
4871     return VK_ERROR_DEVICE_LOST;
4872 #else
4873     return VK_ERROR_OUT_OF_HOST_MEMORY;
4874 #endif
4875 }
4876 
on_vkGetFenceStatus(void * context,VkResult input_result,VkDevice device,VkFence fence)4877 VkResult ResourceTracker::on_vkGetFenceStatus(void* context, VkResult input_result, VkDevice device,
4878                                               VkFence fence) {
4879     VkEncoder* enc = (VkEncoder*)context;
4880 
4881 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4882     {
4883         std::unique_lock<std::recursive_mutex> lock(mLock);
4884 
4885         auto fenceInfoIt = info_VkFence.find(fence);
4886         if (fenceInfoIt == info_VkFence.end()) {
4887             mesa_loge("Failed to find VkFence:%p", fence);
4888             return VK_NOT_READY;
4889         }
4890         auto& fenceInfo = fenceInfoIt->second;
4891 
4892         if (fenceInfo.syncFd) {
4893             if (*fenceInfo.syncFd == -1) {
4894                 return VK_SUCCESS;
4895             }
4896 
4897             int syncFdSignaled = mSyncHelper->wait(*fenceInfo.syncFd, /*timeout=*/0) == 0;
4898             return syncFdSignaled ? VK_SUCCESS : VK_NOT_READY;
4899         }
4900     }
4901 #endif
4902 
4903     return enc->vkGetFenceStatus(device, fence, /*doLock=*/true);
4904 }
4905 
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4906 VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4907                                              uint32_t fenceCount, const VkFence* pFences,
4908                                              VkBool32 waitAll, uint64_t timeout) {
4909     VkEncoder* enc = (VkEncoder*)context;
4910 
4911 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4912     std::vector<int> fencesExternalSyncFds;
4913     std::vector<VkFence> fencesNonExternal;
4914 
4915     std::unique_lock<std::recursive_mutex> lock(mLock);
4916 
4917     for (uint32_t i = 0; i < fenceCount; ++i) {
4918         auto it = info_VkFence.find(pFences[i]);
4919         if (it == info_VkFence.end()) continue;
4920         const auto& info = it->second;
4921         if (info.syncFd) {
4922             if (*info.syncFd >= 0) {
4923                 fencesExternalSyncFds.push_back(*info.syncFd);
4924             }
4925         } else {
4926             fencesNonExternal.push_back(pFences[i]);
4927         }
4928     }
4929 
4930     lock.unlock();
4931 
4932     for (auto fd : fencesExternalSyncFds) {
4933         mesa_logd("Waiting on sync fd: %d", fd);
4934 
4935         std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
4936         // syncHelper works in milliseconds
4937         mSyncHelper->wait(fd, DIV_ROUND_UP(timeout, 1000));
4938         std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
4939 
4940         uint64_t timeTaken =
4941             std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
4942         if (timeTaken >= timeout) {
4943             return VK_TIMEOUT;
4944         }
4945 
4946         timeout -= timeTaken;
4947         mesa_logd("Done waiting on sync fd: %d", fd);
4948 
4949 #if GFXSTREAM_SYNC_DEBUG
4950         mSyncHelper->debugPrint(fd);
4951 #endif
4952     }
4953 
4954     if (!fencesNonExternal.empty()) {
4955         auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4956         auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4957         mesa_logd("vkWaitForFences to host");
4958         return vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4959                                           fencesNonExternal.data(), waitAll, timeout,
4960                                           true /* do lock */);
4961     }
4962 
4963     return VK_SUCCESS;
4964 
4965 #else
4966     return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4967 #endif
4968 }
4969 
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4970 VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4971                                                     const VkDescriptorPoolCreateInfo* pCreateInfo,
4972                                                     const VkAllocationCallbacks* pAllocator,
4973                                                     VkDescriptorPool* pDescriptorPool) {
4974     VkEncoder* enc = (VkEncoder*)context;
4975 
4976     VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4977                                                true /* do lock */);
4978 
4979     if (res != VK_SUCCESS) return res;
4980 
4981     VkDescriptorPool pool = *pDescriptorPool;
4982 
4983     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4984     dp->allocInfo = new DescriptorPoolAllocationInfo;
4985     dp->allocInfo->device = device;
4986     dp->allocInfo->createFlags = pCreateInfo->flags;
4987     dp->allocInfo->maxSets = pCreateInfo->maxSets;
4988     dp->allocInfo->usedSets = 0;
4989 
4990     for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4991         dp->allocInfo->descriptorCountInfo.push_back({
4992             pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
4993             0, /* used */
4994         });
4995     }
4996 
4997     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
4998         std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
4999 
5000         uint32_t count = pCreateInfo->maxSets;
5001         enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
5002                                               true /* do lock */);
5003 
5004         dp->allocInfo->freePoolIds = poolIds;
5005     }
5006 
5007     return res;
5008 }
5009 
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)5010 void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
5011                                                  VkDescriptorPool descriptorPool,
5012                                                  const VkAllocationCallbacks* pAllocator) {
5013     if (!descriptorPool) return;
5014 
5015     VkEncoder* enc = (VkEncoder*)context;
5016 
5017     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5018 
5019     enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
5020 }
5021 
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)5022 VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
5023                                                    VkDescriptorPool descriptorPool,
5024                                                    VkDescriptorPoolResetFlags flags) {
5025     if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
5026 
5027     VkEncoder* enc = (VkEncoder*)context;
5028 
5029     VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
5030 
5031     if (res != VK_SUCCESS) return res;
5032 
5033     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5034     return res;
5035 }
5036 
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)5037 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
5038     void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
5039     VkDescriptorSet* pDescriptorSets) {
5040     VkEncoder* enc = (VkEncoder*)context;
5041     auto ci = pAllocateInfo;
5042     auto sets = pDescriptorSets;
5043     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5044         // Using the pool ID's we collected earlier from the host
5045         VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
5046 
5047         if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
5048 
5049         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5050             register_VkDescriptorSet(sets[i]);
5051             VkDescriptorSetLayout setLayout =
5052                 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
5053 
5054             // Need to add ref to the set layout in the virtual case
5055             // because the set itself might not be realized on host at the
5056             // same time
5057             struct goldfish_VkDescriptorSetLayout* dsl =
5058                 as_goldfish_VkDescriptorSetLayout(setLayout);
5059             ++dsl->layoutInfo->refcount;
5060         }
5061     } else {
5062         VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
5063 
5064         if (allocRes != VK_SUCCESS) return allocRes;
5065 
5066         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5067             applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
5068             fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
5069         }
5070     }
5071 
5072     return VK_SUCCESS;
5073 }
5074 
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)5075 VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
5076                                                   VkDescriptorPool descriptorPool,
5077                                                   uint32_t descriptorSetCount,
5078                                                   const VkDescriptorSet* pDescriptorSets) {
5079     VkEncoder* enc = (VkEncoder*)context;
5080 
5081     // Bit of robustness so that we can double free descriptor sets
5082     // and do other invalid usages
5083     // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
5084     // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
5085     std::vector<VkDescriptorSet> toActuallyFree;
5086     {
5087         std::lock_guard<std::recursive_mutex> lock(mLock);
5088 
5089         // Pool was destroyed
5090         if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
5091             return VK_SUCCESS;
5092         }
5093 
5094         if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
5095 
5096         std::vector<VkDescriptorSet> existingDescriptorSets;
5097         ;
5098 
5099         // Check if this descriptor set was in the pool's set of allocated descriptor sets,
5100         // to guard against double free (Double free is allowed by the client)
5101         {
5102             auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
5103 
5104             for (uint32_t i = 0; i < descriptorSetCount; ++i) {
5105                 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
5106                     mesa_loge(
5107                         "%s: Warning: descriptor set %p not found in pool. Was this "
5108                         "double-freed?\n",
5109                         __func__, (void*)pDescriptorSets[i]);
5110                     continue;
5111                 }
5112 
5113                 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
5114                 if (it == info_VkDescriptorSet.end()) continue;
5115 
5116                 existingDescriptorSets.push_back(pDescriptorSets[i]);
5117             }
5118         }
5119 
5120         for (auto set : existingDescriptorSets) {
5121             if (removeDescriptorSetFromPool(set,
5122                                             mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)) {
5123                 toActuallyFree.push_back(set);
5124             }
5125         }
5126 
5127         if (toActuallyFree.empty()) return VK_SUCCESS;
5128     }
5129 
5130     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5131         // In the batched set update case, decrement refcount on the set layout
5132         // and only free on host if we satisfied a pending allocation on the
5133         // host.
5134         for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
5135             VkDescriptorSetLayout setLayout =
5136                 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
5137             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
5138         }
5139         freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
5140                                           toActuallyFree.data());
5141     } else {
5142         // In the non-batched set update case, just free them directly.
5143         enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
5144                                   toActuallyFree.data(), true /* do lock */);
5145     }
5146     return VK_SUCCESS;
5147 }
5148 
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5149 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
5150     void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5151     const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
5152     VkEncoder* enc = (VkEncoder*)context;
5153 
5154     VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
5155                                                     true /* do lock */);
5156 
5157     if (res != VK_SUCCESS) return res;
5158 
5159     struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5160     dsl->layoutInfo = new DescriptorSetLayoutInfo;
5161     for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5162         dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5163     }
5164     dsl->layoutInfo->refcount = 1;
5165 
5166     return res;
5167 }
5168 
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5169 void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
5170                                                 uint32_t descriptorWriteCount,
5171                                                 const VkWriteDescriptorSet* pDescriptorWrites,
5172                                                 uint32_t descriptorCopyCount,
5173                                                 const VkCopyDescriptorSet* pDescriptorCopies) {
5174     VkEncoder* enc = (VkEncoder*)context;
5175 
5176     std::vector<VkDescriptorImageInfo> transformedImageInfos;
5177     std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5178 
5179     memcpy(transformedWrites.data(), pDescriptorWrites,
5180            sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5181 
5182     size_t imageInfosNeeded = 0;
5183     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5184         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5185         if (!transformedWrites[i].pImageInfo) continue;
5186 
5187         imageInfosNeeded += transformedWrites[i].descriptorCount;
5188     }
5189 
5190     transformedImageInfos.resize(imageInfosNeeded);
5191 
5192     size_t imageInfoIndex = 0;
5193     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5194         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5195         if (!transformedWrites[i].pImageInfo) continue;
5196 
5197         for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5198             transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5199             ++imageInfoIndex;
5200         }
5201         transformedWrites[i].pImageInfo =
5202             &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5203     }
5204 
5205     {
5206         // Validate and filter samplers
5207         std::lock_guard<std::recursive_mutex> lock(mLock);
5208         size_t imageInfoIndex = 0;
5209         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5210             if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5211             if (!transformedWrites[i].pImageInfo) continue;
5212 
5213             bool isImmutableSampler = descriptorBindingIsImmutableSampler(
5214                 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
5215 
5216             for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5217                 if (isImmutableSampler) {
5218                     transformedImageInfos[imageInfoIndex].sampler = 0;
5219                 }
5220                 transformedImageInfos[imageInfoIndex] =
5221                     filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5222                 ++imageInfoIndex;
5223             }
5224         }
5225     }
5226 
5227     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5228         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5229             VkDescriptorSet set = transformedWrites[i].dstSet;
5230             doEmulatedDescriptorWrite(&transformedWrites[i],
5231                                       as_goldfish_VkDescriptorSet(set)->reified);
5232         }
5233 
5234         for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5235             doEmulatedDescriptorCopy(
5236                 &pDescriptorCopies[i],
5237                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5238                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5239         }
5240     } else {
5241         enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5242                                     descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5243     }
5244 }
5245 
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5246 void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5247                                         const VkAllocationCallbacks* pAllocator) {
5248 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5249     {
5250         std::lock_guard<std::recursive_mutex> lock(mLock);  // do not guard encoder may cause
5251                                                             // deadlock b/243339973
5252 
5253         // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5254         // potentially processing the below `vkDestroyImage()` from the VK encoder
5255         // command stream before processing a previously submitted
5256         // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5257         // stream which relies on the image existing.
5258         auto imageInfoIt = info_VkImage.find(image);
5259         if (imageInfoIt != info_VkImage.end()) {
5260             auto& imageInfo = imageInfoIt->second;
5261             for (int syncFd : imageInfo.pendingQsriSyncFds) {
5262                 int syncWaitRet = mSyncHelper->wait(syncFd, 3000);
5263                 if (syncWaitRet < 0) {
5264                     mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5265                               __func__, strerror(errno), errno);
5266                 }
5267 
5268 #if GFXSTREAM_SYNC_DEBUG
5269                 mSyncHelper->debugPrint(syncFd);
5270 #endif
5271                 mSyncHelper->close(syncFd);
5272             }
5273             imageInfo.pendingQsriSyncFds.clear();
5274         }
5275     }
5276 #endif
5277     VkEncoder* enc = (VkEncoder*)context;
5278 #if defined(LINUX_GUEST_BUILD)
5279     auto imageInfoIt = info_VkImage.find(image);
5280     if (imageInfoIt != info_VkImage.end()) {
5281         auto& imageInfo = imageInfoIt->second;
5282         if (imageInfo.linearPeerImage) {
5283             enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */);
5284         }
5285     }
5286 #endif
5287     enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5288 }
5289 
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5290 void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5291                                                       VkMemoryRequirements* pMemoryRequirements) {
5292     std::unique_lock<std::recursive_mutex> lock(mLock);
5293 
5294     auto it = info_VkImage.find(image);
5295     if (it == info_VkImage.end()) return;
5296 
5297     auto& info = it->second;
5298 
5299     if (info.baseRequirementsKnown) {
5300         *pMemoryRequirements = info.baseRequirements;
5301         return;
5302     }
5303 
5304     lock.unlock();
5305 
5306     VkEncoder* enc = (VkEncoder*)context;
5307 
5308     enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5309 
5310     lock.lock();
5311 
5312     transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5313 
5314     info.baseRequirementsKnown = true;
5315     info.baseRequirements = *pMemoryRequirements;
5316 }
5317 
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5318 void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5319                                                        const VkImageMemoryRequirementsInfo2* pInfo,
5320                                                        VkMemoryRequirements2* pMemoryRequirements) {
5321     VkEncoder* enc = (VkEncoder*)context;
5322     enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5323     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5324 }
5325 
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5326 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5327     void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5328     VkMemoryRequirements2* pMemoryRequirements) {
5329     VkEncoder* enc = (VkEncoder*)context;
5330     enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5331     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5332 }
5333 
on_vkGetImageSubresourceLayout(void * context,VkDevice device,VkImage image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)5334 void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image,
5335                                                      const VkImageSubresource* pSubresource,
5336                                                      VkSubresourceLayout* pLayout) {
5337     VkEncoder* enc = (VkEncoder*)context;
5338     VkImage targetImage = image;
5339 #if defined(LINUX_GUEST_BUILD)
5340     auto it = info_VkImage.find(image);
5341     if (it == info_VkImage.end()) return;
5342     const auto& info = it->second;
5343     if (info.linearPeerImage) {
5344         targetImage = info.linearPeerImage;
5345     }
5346 #endif
5347     enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout,
5348                                      true /* do lock */);
5349 }
5350 
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5351 VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5352                                                VkImage image, VkDeviceMemory memory,
5353                                                VkDeviceSize memoryOffset) {
5354     VkEncoder* enc = (VkEncoder*)context;
5355     // Do not forward calls with invalid handles to host.
5356     if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5357         info_VkImage.find(image) == info_VkImage.end()) {
5358         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5359     }
5360     return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5361 }
5362 
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5363 VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5364                                                 uint32_t bindingCount,
5365                                                 const VkBindImageMemoryInfo* pBindInfos) {
5366     VkEncoder* enc = (VkEncoder*)context;
5367 
5368     if (bindingCount < 1 || !pBindInfos) {
5369         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5370     }
5371 
5372     for (uint32_t i = 0; i < bindingCount; i++) {
5373         const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5374 
5375         auto imageIt = info_VkImage.find(bimi.image);
5376         if (imageIt == info_VkImage.end()) {
5377             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5378         }
5379 
5380         if (bimi.memory != VK_NULL_HANDLE) {
5381             auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5382             if (memoryIt == info_VkDeviceMemory.end()) {
5383                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5384             }
5385         }
5386     }
5387 
5388     return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5389 }
5390 
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5391 VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5392                                                    uint32_t bindingCount,
5393                                                    const VkBindImageMemoryInfo* pBindInfos) {
5394     return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5395 }
5396 
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5397 VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5398                                             const VkBufferCreateInfo* pCreateInfo,
5399                                             const VkAllocationCallbacks* pAllocator,
5400                                             VkBuffer* pBuffer) {
5401     VkEncoder* enc = (VkEncoder*)context;
5402 
5403     VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5404     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5405     VkExternalMemoryBufferCreateInfo localExtBufCi;
5406 
5407     const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5408         vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5409     if (extBufCiPtr) {
5410         localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5411         vk_append_struct(&structChainIter, &localExtBufCi);
5412     }
5413 
5414     VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5415     const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5416         vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo);
5417     if (pCapAddrCi) {
5418         localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5419         vk_append_struct(&structChainIter, &localCapAddrCi);
5420     }
5421 
5422     VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5423     const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5424         vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo);
5425     if (pDevAddrCi) {
5426         localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5427         vk_append_struct(&structChainIter, &localDevAddrCi);
5428     }
5429 
5430 #ifdef VK_USE_PLATFORM_FUCHSIA
5431     std::optional<zx::vmo> vmo;
5432     bool isSysmemBackedMemory = false;
5433 
5434     if (extBufCiPtr &&
5435         (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5436         isSysmemBackedMemory = true;
5437     }
5438 
5439     const auto* extBufferCollectionPtr =
5440         vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(pCreateInfo);
5441 
5442     if (extBufferCollectionPtr) {
5443         const auto& collection =
5444             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5445                 extBufferCollectionPtr->collection);
5446         uint32_t index = extBufferCollectionPtr->index;
5447 
5448         auto result = collection->WaitForBuffersAllocated();
5449         if (result.ok() && result->status == ZX_OK) {
5450             auto& info = result->buffer_collection_info;
5451             if (index < info.buffer_count) {
5452                 vmo = std::make_optional<zx::vmo>(std::move(info.buffers[index].vmo));
5453             }
5454         } else {
5455             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
5456                       GET_STATUS_SAFE(result, status));
5457         }
5458 
5459         if (vmo && vmo->is_valid()) {
5460             fidl::Arena arena;
5461             fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5462             createParams.set_size(arena, pCreateInfo->size)
5463                 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5464 
5465             auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5466             if (!result.ok() ||
5467                 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5468                 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
5469                           GET_STATUS_SAFE(result, error_value()));
5470             }
5471             isSysmemBackedMemory = true;
5472         }
5473     }
5474 #endif  // VK_USE_PLATFORM_FUCHSIA
5475 
5476     VkResult res;
5477     VkMemoryRequirements memReqs;
5478 
5479     if (supportsCreateResourcesWithRequirements()) {
5480         res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5481                                                         pBuffer, &memReqs, true /* do lock */);
5482     } else {
5483         res =
5484             enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5485     }
5486 
5487     if (res != VK_SUCCESS) return res;
5488 
5489 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5490     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5491         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5492     }
5493     if (extBufCiPtr &&
5494         ((extBufCiPtr->handleTypes &
5495           VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5496          (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5497         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5498     }
5499 #endif
5500 
5501     std::lock_guard<std::recursive_mutex> lock(mLock);
5502 
5503     auto it = info_VkBuffer.find(*pBuffer);
5504     if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5505 
5506     auto& info = it->second;
5507 
5508     info.createInfo = localCreateInfo;
5509     info.createInfo.pNext = nullptr;
5510 
5511     if (supportsCreateResourcesWithRequirements()) {
5512         info.baseRequirementsKnown = true;
5513         info.baseRequirements = memReqs;
5514     }
5515 
5516     if (extBufCiPtr) {
5517         info.external = true;
5518         info.externalCreateInfo = *extBufCiPtr;
5519     }
5520 
5521 #ifdef VK_USE_PLATFORM_FUCHSIA
5522     if (isSysmemBackedMemory) {
5523         info.isSysmemBackedMemory = true;
5524     }
5525 #endif
5526 
5527     return res;
5528 }
5529 
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5530 void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5531                                          const VkAllocationCallbacks* pAllocator) {
5532     VkEncoder* enc = (VkEncoder*)context;
5533     enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5534 }
5535 
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5536 void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5537                                                        VkBuffer buffer,
5538                                                        VkMemoryRequirements* pMemoryRequirements) {
5539     std::unique_lock<std::recursive_mutex> lock(mLock);
5540 
5541     auto it = info_VkBuffer.find(buffer);
5542     if (it == info_VkBuffer.end()) return;
5543 
5544     auto& info = it->second;
5545 
5546     if (info.baseRequirementsKnown) {
5547         *pMemoryRequirements = info.baseRequirements;
5548         return;
5549     }
5550 
5551     lock.unlock();
5552 
5553     VkEncoder* enc = (VkEncoder*)context;
5554     enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
5555 
5556     lock.lock();
5557 
5558     info.baseRequirementsKnown = true;
5559     info.baseRequirements = *pMemoryRequirements;
5560 }
5561 
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5562 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5563     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5564     VkMemoryRequirements2* pMemoryRequirements) {
5565     VkEncoder* enc = (VkEncoder*)context;
5566     enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5567     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5568 }
5569 
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5570 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5571     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5572     VkMemoryRequirements2* pMemoryRequirements) {
5573     VkEncoder* enc = (VkEncoder*)context;
5574     enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5575     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5576 }
5577 
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5578 VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5579                                                 VkBuffer buffer, VkDeviceMemory memory,
5580                                                 VkDeviceSize memoryOffset) {
5581     VkEncoder* enc = (VkEncoder*)context;
5582     return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5583 }
5584 
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5585 VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5586                                                  uint32_t bindInfoCount,
5587                                                  const VkBindBufferMemoryInfo* pBindInfos) {
5588     VkEncoder* enc = (VkEncoder*)context;
5589     return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5590 }
5591 
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5592 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5593                                                     uint32_t bindInfoCount,
5594                                                     const VkBindBufferMemoryInfo* pBindInfos) {
5595     VkEncoder* enc = (VkEncoder*)context;
5596     return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5597 }
5598 
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5599 VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5600                                                VkDevice device,
5601                                                const VkSemaphoreCreateInfo* pCreateInfo,
5602                                                const VkAllocationCallbacks* pAllocator,
5603                                                VkSemaphore* pSemaphore) {
5604     (void)input_result;
5605     VkEncoder* enc = (VkEncoder*)context;
5606 
5607     VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5608 
5609     const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5610         vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5611 
5612 #ifdef VK_USE_PLATFORM_FUCHSIA
5613     bool exportEvent =
5614         exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5615                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5616 
5617     if (exportEvent) {
5618         finalCreateInfo.pNext = nullptr;
5619         // If we have timeline semaphores externally, leave it there.
5620         const VkSemaphoreTypeCreateInfo* typeCi =
5621             vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5622         if (typeCi) finalCreateInfo.pNext = typeCi;
5623     }
5624 #endif
5625 
5626 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5627     bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5628                                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5629 
5630     if (exportSyncFd) {
5631         finalCreateInfo.pNext = nullptr;
5632         // If we have timeline semaphores externally, leave it there.
5633         const VkSemaphoreTypeCreateInfo* typeCi =
5634             vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5635         if (typeCi) finalCreateInfo.pNext = typeCi;
5636     }
5637 #endif
5638     input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5639                                           true /* do lock */);
5640 
5641     zx_handle_t event_handle = ZX_HANDLE_INVALID;
5642 
5643 #ifdef VK_USE_PLATFORM_FUCHSIA
5644     if (exportEvent) {
5645         zx_event_create(0, &event_handle);
5646     }
5647 #endif
5648 
5649     std::lock_guard<std::recursive_mutex> lock(mLock);
5650 
5651     auto it = info_VkSemaphore.find(*pSemaphore);
5652     if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5653 
5654     auto& info = it->second;
5655 
5656     info.device = device;
5657     info.eventHandle = event_handle;
5658 #ifdef VK_USE_PLATFORM_FUCHSIA
5659     info.eventKoid = getEventKoid(info.eventHandle);
5660 #endif
5661 
5662 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5663     if (exportSyncFd) {
5664         if (mFeatureInfo.hasVirtioGpuNativeSync &&
5665             !(mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync)) {
5666             VkResult result;
5667             int64_t osHandle;
5668             uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5669 
5670             result = createFence(device, hostFenceHandle, osHandle);
5671             if (result != VK_SUCCESS) return result;
5672 
5673             info.syncFd.emplace(osHandle);
5674         } else {
5675 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5676             ensureSyncDeviceFd();
5677 
5678             if (exportSyncFd) {
5679                 int syncFd = -1;
5680                 goldfish_sync_queue_work(
5681                     mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5682                     GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5683                     ,
5684                     &syncFd);
5685                 info.syncFd.emplace(syncFd);
5686             }
5687 #endif
5688         }
5689     }
5690 #endif
5691 
5692     return VK_SUCCESS;
5693 }
5694 
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5695 void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5696                                             const VkAllocationCallbacks* pAllocator) {
5697     VkEncoder* enc = (VkEncoder*)context;
5698     enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5699 }
5700 
5701 // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5702 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5703 // of it to the application. To avoid leaking resources, the application must release ownership
5704 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5705 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5706                                                  const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5707                                                  int* pFd) {
5708 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5709     VkEncoder* enc = (VkEncoder*)context;
5710     bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5711 
5712     if (getSyncFd) {
5713         if (mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync) {
5714             uint64_t syncId = ++mAtomicId;
5715             int64_t osHandle = -1;
5716 
5717             VkResult result = enc->vkGetSemaphoreGOOGLE(device, pGetFdInfo->semaphore, syncId,
5718                                                         true /* do lock */);
5719             if (result != VK_SUCCESS) {
5720                 mesa_loge("unable to get the semaphore");
5721                 return result;
5722             }
5723 
5724             result = acquireSync(syncId, osHandle);
5725             if (result != VK_SUCCESS) {
5726                 mesa_loge("unable to create host sync object");
5727                 return result;
5728             }
5729 
5730             *pFd = (int)osHandle;
5731             return VK_SUCCESS;
5732         } else {
5733             // Doesn't this assume that sync file descriptor generated via the non-fence
5734             // passing path during "on_vkCreateSemaphore" is the same one that would be
5735             // generated via guest's "okGetSemaphoreFdKHR" call?
5736             std::lock_guard<std::recursive_mutex> lock(mLock);
5737             auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5738             if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5739             auto& semInfo = it->second;
5740             // syncFd is supposed to have value.
5741             *pFd = mSyncHelper->dup(semInfo.syncFd.value_or(-1));
5742             return VK_SUCCESS;
5743         }
5744     } else {
5745         // opaque fd
5746         int hostFd = 0;
5747         int32_t size = 0;
5748         VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5749         if (result != VK_SUCCESS) {
5750             return result;
5751         }
5752         *pFd = os_create_anonymous_file(size, "vk_opaque_fd");
5753         write(*pFd, &hostFd, sizeof(hostFd));
5754         return VK_SUCCESS;
5755     }
5756 #else
5757     (void)context;
5758     (void)device;
5759     (void)pGetFdInfo;
5760     (void)pFd;
5761     return VK_ERROR_INCOMPATIBLE_DRIVER;
5762 #endif
5763 }
5764 
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5765 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5766     void* context, VkResult input_result, VkDevice device,
5767     const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5768 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5769     VkEncoder* enc = (VkEncoder*)context;
5770     if (input_result != VK_SUCCESS) {
5771         return input_result;
5772     }
5773 
5774     if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5775         VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5776 
5777         std::lock_guard<std::recursive_mutex> lock(mLock);
5778 
5779         auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5780         auto& info = semaphoreIt->second;
5781 
5782         if (info.syncFd.value_or(-1) >= 0) {
5783             mSyncHelper->close(info.syncFd.value());
5784         }
5785 
5786         info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5787 
5788         return VK_SUCCESS;
5789     } else {
5790         int fd = pImportSemaphoreFdInfo->fd;
5791         int err = lseek(fd, 0, SEEK_SET);
5792         if (err == -1) {
5793             mesa_loge("lseek fail on import semaphore");
5794         }
5795         int hostFd = 0;
5796         read(fd, &hostFd, sizeof(hostFd));
5797         VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5798         tmpInfo.fd = hostFd;
5799         VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5800         mSyncHelper->close(fd);
5801         return result;
5802     }
5803 #else
5804     (void)context;
5805     (void)input_result;
5806     (void)device;
5807     (void)pImportSemaphoreFdInfo;
5808     return VK_ERROR_INCOMPATIBLE_DRIVER;
5809 #endif
5810 }
5811 
on_vkGetMemoryFdPropertiesKHR(void * context,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)5812 VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR(
5813     void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
5814     VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
5815 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5816     if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
5817         mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n",
5818                   __func__, handleType);
5819         return VK_ERROR_INVALID_EXTERNAL_HANDLE;
5820     }
5821     // Sanity-check device
5822     std::lock_guard<std::recursive_mutex> lock(mLock);
5823     auto deviceIt = info_VkDevice.find(device);
5824     if (deviceIt == info_VkDevice.end()) {
5825         return VK_ERROR_OUT_OF_HOST_MEMORY;
5826     }
5827     // TODO: Verify FD valid ?
5828     (void)fd;
5829 
5830     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5831         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5832     }
5833 
5834     updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits,
5835                          mCaps.vulkanCapset.colorBufferMemoryIndex);
5836 
5837     return VK_SUCCESS;
5838 #else
5839     (void)context;
5840     (void)device;
5841     (void)handleType;
5842     (void)fd;
5843     (void)pMemoryFdProperties;
5844     return VK_ERROR_INCOMPATIBLE_DRIVER;
5845 #endif
5846 }
5847 
on_vkGetMemoryFdKHR(void * context,VkResult,VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)5848 VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5849                                               const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5850 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5851     if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5852     if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5853 
5854     if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5855                                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5856         mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5857                   pGetFdInfo->handleType);
5858         return VK_ERROR_OUT_OF_HOST_MEMORY;
5859     }
5860     // Sanity-check device
5861     std::lock_guard<std::recursive_mutex> lock(mLock);
5862     auto deviceIt = info_VkDevice.find(device);
5863     if (deviceIt == info_VkDevice.end()) {
5864         return VK_ERROR_OUT_OF_HOST_MEMORY;
5865     }
5866 
5867     auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5868     if (deviceMemIt == info_VkDeviceMemory.end()) {
5869         return VK_ERROR_OUT_OF_HOST_MEMORY;
5870     }
5871     auto& info = deviceMemIt->second;
5872 
5873     if (!info.blobPtr) {
5874         mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
5875         return VK_ERROR_OUT_OF_HOST_MEMORY;
5876     }
5877 
5878     VirtGpuExternalHandle handle{};
5879     int ret = info.blobPtr->exportBlob(handle);
5880     if (ret != 0 || handle.osHandle < 0) {
5881         mesa_loge("%s: Failed to export host resource to FD.\n", __func__);
5882         return VK_ERROR_OUT_OF_HOST_MEMORY;
5883     }
5884     *pFd = handle.osHandle;
5885     return VK_SUCCESS;
5886 #else
5887     (void)context;
5888     (void)device;
5889     (void)pGetFdInfo;
5890     (void)pFd;
5891     return VK_ERROR_INCOMPATIBLE_DRIVER;
5892 #endif
5893 }
5894 
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5895 void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5896     void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5897     if (workingSet.empty()) return;
5898 
5899     std::vector<VkCommandBuffer> nextLevel;
5900     for (auto commandBuffer : workingSet) {
5901         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5902         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5903             nextLevel.push_back((VkCommandBuffer)secondary);
5904         });
5905     }
5906 
5907     flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5908 
5909     // After this point, everyone at the previous level has been flushed
5910     for (auto cmdbuf : workingSet) {
5911         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5912 
5913         // There's no pending commands here, skip. (case 1)
5914         if (!cb->privateStream) continue;
5915 
5916         unsigned char* writtenPtr = 0;
5917         size_t written = 0;
5918         CommandBufferStagingStream* cmdBufStream =
5919             static_cast<CommandBufferStagingStream*>(cb->privateStream);
5920         cmdBufStream->getWritten(&writtenPtr, &written);
5921 
5922         // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5923         if (!written) continue;
5924 
5925         // There are pending commands to flush.
5926         VkEncoder* enc = (VkEncoder*)context;
5927         VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5928         VkDeviceSize dataOffset = 0;
5929         if (mFeatureInfo.hasVulkanAuxCommandMemory) {
5930             // for suballocations, deviceMemory is an alias VkDeviceMemory
5931             // get underling VkDeviceMemory for given alias
5932             deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5933                                          1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5934                                          nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5935                                          nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5936 
5937             // mark stream as flushing before flushing commands
5938             cmdBufStream->markFlushing();
5939             enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5940                                                          written, true /*do lock*/);
5941         } else {
5942             enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5943                                             true /* do lock */);
5944         }
5945         // Reset this stream.
5946         // flushing happens on vkQueueSubmit
5947         // vulkan api states that on queue submit,
5948         // applications MUST not attempt to modify the command buffer in any way
5949         // -as the device may be processing the commands recorded to it.
5950         // It is safe to call reset() here for this reason.
5951         // Command Buffer associated with this stream will only leave pending state
5952         // after queue submit is complete and host has read the data
5953         cmdBufStream->reset();
5954     }
5955 }
5956 
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)5957 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5958     if (!supportsAsyncQueueSubmit()) {
5959         return 0;
5960     }
5961 
5962     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5963     if (!q) return 0;
5964 
5965     auto lastEncoder = q->lastUsedEncoder;
5966 
5967     if (lastEncoder == currentEncoder) return 0;
5968 
5969     currentEncoder->incRef();
5970 
5971     q->lastUsedEncoder = currentEncoder;
5972 
5973     if (!lastEncoder) return 0;
5974 
5975     auto oldSeq = q->sequenceNumber;
5976     q->sequenceNumber += 2;
5977     lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5978     lastEncoder->flush();
5979     currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5980 
5981     if (lastEncoder->decRef()) {
5982         q->lastUsedEncoder = nullptr;
5983     }
5984 
5985     return 0;
5986 }
5987 
5988 template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)5989 void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
5990                                           const VkSubmitInfoType* pSubmits) {
5991     std::vector<VkCommandBuffer> toFlush;
5992     for (uint32_t i = 0; i < submitCount; ++i) {
5993         for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
5994             toFlush.push_back(getCommandBuffer(pSubmits[i], j));
5995         }
5996     }
5997 
5998     std::unordered_set<VkDescriptorSet> pendingSets;
5999     collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
6000     commitDescriptorSetUpdates(context, queue, pendingSets);
6001 
6002     flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
6003 
6004     for (auto cb : toFlush) {
6005         resetCommandBufferPendingTopology(cb);
6006     }
6007 }
6008 
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6009 VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
6010                                            uint32_t submitCount, const VkSubmitInfo* pSubmits,
6011                                            VkFence fence) {
6012     MESA_TRACE_SCOPE("on_vkQueueSubmit");
6013 
6014     /* From the Vulkan 1.3.204 spec:
6015      *
6016      *    VUID-VkSubmitInfo-pNext-03240
6017      *
6018      *    "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
6019      *    and any element of pSignalSemaphores was created with a VkSemaphoreType of
6020      *    VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
6021      *    signalSemaphoreCount"
6022      *
6023      * Internally, Mesa WSI creates placeholder semaphores/fences (see transformVkSemaphore functions
6024      * in in gfxstream_vk_private.cpp).  We don't want to forward that to the host, since there is
6025      * no host side Vulkan object associated with the placeholder sync objects.
6026      *
6027      * The way to test this behavior is Zink + glxgears, on Linux hosts.  It should fail without
6028      * this check.
6029      */
6030     for (uint32_t i = 0; i < submitCount; i++) {
6031         VkTimelineSemaphoreSubmitInfo* tssi = const_cast<VkTimelineSemaphoreSubmitInfo*>(
6032             vk_find_struct<VkTimelineSemaphoreSubmitInfo>(&pSubmits[i]));
6033 
6034         if (tssi) {
6035             uint32_t count = getSignalSemaphoreCount(pSubmits[i]);
6036             if (count != tssi->signalSemaphoreValueCount) {
6037                 tssi->signalSemaphoreValueCount = count;
6038             }
6039         }
6040     }
6041 
6042     return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
6043                                                   pSubmits, fence);
6044 }
6045 
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6046 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
6047                                             uint32_t submitCount, const VkSubmitInfo2* pSubmits,
6048                                             VkFence fence) {
6049     MESA_TRACE_SCOPE("on_vkQueueSubmit2");
6050     return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
6051                                                    pSubmits, fence);
6052 }
6053 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6054 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6055                                            const VkSubmitInfo* pSubmits, VkFence fence) {
6056     if (supportsAsyncQueueSubmit()) {
6057         enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6058         return VK_SUCCESS;
6059     } else {
6060         return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
6061     }
6062 }
6063 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6064 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6065                                            const VkSubmitInfo2* pSubmits, VkFence fence) {
6066     if (supportsAsyncQueueSubmit()) {
6067         enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6068         return VK_SUCCESS;
6069     } else {
6070         return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
6071     }
6072 }
6073 
6074 template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)6075 VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
6076                                                    VkQueue queue, uint32_t submitCount,
6077                                                    const VkSubmitInfoType* pSubmits,
6078                                                    VkFence fence) {
6079     flushStagingStreams(context, queue, submitCount, pSubmits);
6080 
6081     std::vector<VkSemaphore> pre_signal_semaphores;
6082     std::vector<zx_handle_t> pre_signal_events;
6083     std::vector<int> pre_signal_sync_fds;
6084     std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
6085     std::vector<int> post_wait_sync_fds;
6086 
6087     VkEncoder* enc = (VkEncoder*)context;
6088 
6089     std::unique_lock<std::recursive_mutex> lock(mLock);
6090 
6091     for (uint32_t i = 0; i < submitCount; ++i) {
6092         for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
6093             VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
6094             auto it = info_VkSemaphore.find(semaphore);
6095             if (it != info_VkSemaphore.end()) {
6096                 auto& semInfo = it->second;
6097 #ifdef VK_USE_PLATFORM_FUCHSIA
6098                 if (semInfo.eventHandle) {
6099                     pre_signal_events.push_back(semInfo.eventHandle);
6100                     pre_signal_semaphores.push_back(semaphore);
6101                 }
6102 #endif
6103 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6104                 if (semInfo.syncFd.has_value()) {
6105                     pre_signal_sync_fds.push_back(semInfo.syncFd.value());
6106                     pre_signal_semaphores.push_back(semaphore);
6107                 }
6108 #endif
6109             }
6110         }
6111         for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
6112             auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
6113             if (it != info_VkSemaphore.end()) {
6114                 auto& semInfo = it->second;
6115 #ifdef VK_USE_PLATFORM_FUCHSIA
6116                 if (semInfo.eventHandle) {
6117                     post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
6118 #ifndef FUCHSIA_NO_TRACE
6119                     if (semInfo.eventKoid != ZX_KOID_INVALID) {
6120                         // TODO(fxbug.dev/42144867): Remove the "semaphore"
6121                         // FLOW_END events once it is removed from clients
6122                         // (for example, gfx Engine).
6123                         TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
6124                         TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
6125                     }
6126 #endif
6127                 }
6128 #endif
6129 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6130                 if (semInfo.syncFd.value_or(-1) >= 0) {
6131                     post_wait_sync_fds.push_back(semInfo.syncFd.value());
6132                 }
6133 #endif
6134             }
6135         }
6136     }
6137     lock.unlock();
6138 
6139     if (pre_signal_semaphores.empty()) {
6140         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6141         if (input_result != VK_SUCCESS) return input_result;
6142     } else {
6143         // Schedule waits on the OS external objects and
6144         // signal the wait semaphores
6145         // in a separate thread.
6146 #ifdef VK_USE_PLATFORM_FUCHSIA
6147         for (auto event : pre_signal_events) {
6148             preSignalTasks.push_back([event] {
6149                 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
6150             });
6151         }
6152 #endif
6153 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6154         for (auto fd : pre_signal_sync_fds) {
6155             // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
6156             // fd == -1 is treated as already signaled
6157             if (fd != -1) {
6158                 mSyncHelper->wait(fd, 3000);
6159 #if GFXSTREAM_SYNC_DEBUG
6160                 mSyncHelper->debugPrint(fd);
6161 #endif
6162             }
6163         }
6164 #endif
6165         // Use the old version of VkSubmitInfo
6166         VkSubmitInfo submit_info = {
6167             .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
6168             .waitSemaphoreCount = 0,
6169             .pWaitSemaphores = nullptr,
6170             .pWaitDstStageMask = nullptr,
6171             .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
6172             .pSignalSemaphores = pre_signal_semaphores.data()};
6173         vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
6174         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6175         if (input_result != VK_SUCCESS) return input_result;
6176     }
6177     lock.lock();
6178     int externalFenceFdToSignal = -1;
6179 
6180 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6181     if (fence != VK_NULL_HANDLE) {
6182         auto it = info_VkFence.find(fence);
6183         if (it != info_VkFence.end()) {
6184             const auto& info = it->second;
6185             if (info.syncFd && *info.syncFd >= 0) {
6186                 externalFenceFdToSignal = *info.syncFd;
6187             }
6188         }
6189     }
6190 #endif
6191     if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
6192         auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6193         auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6194         auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6195 #ifdef VK_USE_PLATFORM_FUCHSIA
6196             MESA_TRACE_SCOPE("on_vkQueueSubmit::SignalSemaphores");
6197             (void)externalFenceFdToSignal;
6198             for (auto& [event, koid] : post_wait_events) {
6199 #ifndef FUCHSIA_NO_TRACE
6200                 if (koid != ZX_KOID_INVALID) {
6201                     TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6202                     TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6203                 }
6204 #endif
6205                 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6206             }
6207 #endif
6208 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
6209             for (auto& fd : post_wait_sync_fds) {
6210                 goldfish_sync_signal(fd);
6211             }
6212 
6213             if (externalFenceFdToSignal >= 0) {
6214                 mesa_logd("%s: external fence real signal: %d\n", __func__,
6215                           externalFenceFdToSignal);
6216                 goldfish_sync_signal(externalFenceFdToSignal);
6217             }
6218 #endif
6219     }
6220     return VK_SUCCESS;
6221 }
6222 
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)6223 VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
6224     VkEncoder* enc = (VkEncoder*)context;
6225 
6226     // now done waiting, get the host's opinion
6227     return enc->vkQueueWaitIdle(queue, true /* do lock */);
6228 }
6229 
6230 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)6231 void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
6232                                                    VkNativeBufferANDROID* outputNativeInfo) {
6233     if (!inputNativeInfo || !inputNativeInfo->handle) {
6234         return;
6235     }
6236 
6237     if (!outputNativeInfo || !outputNativeInfo) {
6238         mesa_loge("FATAL: Local native buffer info not properly allocated!");
6239         abort();
6240     }
6241 
6242     const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
6243     *(uint32_t*)(outputNativeInfo->handle) = mGralloc->getHostHandle(nativeHandle);
6244 }
6245 
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)6246 void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
6247     const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6248     VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6249     if (!inputBimsi || !inputBimsi->swapchain) {
6250         return;
6251     }
6252 
6253     if (!outputBimsi || !outputBimsi->swapchain) {
6254         return;
6255     }
6256 
6257     // Android based swapchains are implemented by the Android framework's
6258     // libvulkan. The only exist within the guest and should not be sent to
6259     // the host.
6260     outputBimsi->swapchain = VK_NULL_HANDLE;
6261 }
6262 #endif
6263 
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)6264 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
6265                                                        VkImageCreateInfo* local_pCreateInfo) {
6266 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6267     const VkNativeBufferANDROID* inputNativeInfo =
6268         vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
6269 
6270     VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6271         vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo));
6272 
6273     unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6274 #endif
6275 }
6276 
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)6277 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
6278 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6279     (void)fd_out;
6280     if (fd != -1) {
6281         MESA_TRACE_SCOPE("waitNativeFenceInAcquire");
6282         // Implicit Synchronization
6283         mSyncHelper->wait(fd, 3000);
6284         // From libvulkan's swapchain.cpp:
6285         // """
6286         // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6287         // even if the call fails. We could close it ourselves on failure, but
6288         // that would create a race condition if the driver closes it on a
6289         // failure path: some other thread might create an fd with the same
6290         // number between the time the driver closes it and the time we close
6291         // it. We must assume one of: the driver *always* closes it even on
6292         // failure, or *never* closes it on failure.
6293         // """
6294         // Therefore, assume contract where we need to close fd in this driver
6295 
6296 #if GFXSTREAM_SYNC_DEBUG
6297         mSyncHelper->debugPrint(fd);
6298 #endif
6299         mSyncHelper->close(fd);
6300     }
6301 #endif
6302 }
6303 
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6304 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6305     uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6306     VkBindImageMemoryInfo* outputBindInfos) {
6307 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6308     for (uint32_t i = 0; i < bindInfoCount; ++i) {
6309         const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6310         VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6311 
6312         const VkNativeBufferANDROID* inputNativeInfo =
6313             vk_find_struct<VkNativeBufferANDROID>(inputBindInfo);
6314 
6315         VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6316             vk_find_struct<VkNativeBufferANDROID>(outputBindInfo));
6317 
6318         unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6319 
6320         const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6321             vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo);
6322 
6323         VkBindImageMemorySwapchainInfoKHR* outputBimsi =
6324             const_cast<VkBindImageMemorySwapchainInfoKHR*>(
6325                 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo));
6326 
6327         unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6328     }
6329 #endif
6330 }
6331 
6332 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6333 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6334 //    uses address space device to reserve the right size of
6335 //    memory.
6336 // 2. the reservation results in a physical address. the physical
6337 //    address is set as |*pAddress|.
6338 // 3. after pre, the API call is encoded to the host, where the
6339 //    value of pAddress is also sent (the physical address).
6340 // 4. the host will obtain the actual gpu pointer and send it
6341 //    back out in |*pAddress|.
6342 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6343 //    using the mmap() method of GoldfishAddressSpaceBlock to obtain
6344 //    a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6345 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6346                                                                    VkDeviceMemory memory,
6347                                                                    uint64_t* pAddress) {
6348     std::lock_guard<std::recursive_mutex> lock(mLock);
6349 
6350     auto it = info_VkDeviceMemory.find(memory);
6351     if (it == info_VkDeviceMemory.end()) {
6352         return VK_ERROR_OUT_OF_HOST_MEMORY;
6353     }
6354 
6355 #if defined(__ANDROID__)
6356     auto& memInfo = it->second;
6357 
6358     GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6359     block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6360 
6361     memInfo.goldfishBlock = block;
6362     *pAddress = block->physAddr();
6363 
6364     return VK_SUCCESS;
6365 #else
6366     (void)pAddress;
6367     return VK_ERROR_MEMORY_MAP_FAILED;
6368 #endif
6369 }
6370 
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6371 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6372                                                                VkDevice, VkDeviceMemory memory,
6373                                                                uint64_t* pAddress) {
6374     (void)memory;
6375     (void)pAddress;
6376 
6377     if (input_result != VK_SUCCESS) {
6378         return input_result;
6379     }
6380 
6381     return input_result;
6382 }
6383 
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6384 VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6385     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6386     VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6387     std::lock_guard<std::recursive_mutex> lock(mLock);
6388 
6389     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6390     if (it == info_VkDescriptorUpdateTemplate.end()) {
6391         return VK_ERROR_INITIALIZATION_FAILED;
6392     }
6393 
6394     auto& info = it->second;
6395     uint32_t inlineUniformBlockBufferSize = 0;
6396 
6397     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6398         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6399         uint32_t descCount = entry.descriptorCount;
6400         VkDescriptorType descType = entry.descriptorType;
6401         ++info.templateEntryCount;
6402         if (isDescriptorTypeInlineUniformBlock(descType)) {
6403             inlineUniformBlockBufferSize += descCount;
6404             ++info.inlineUniformBlockCount;
6405         } else {
6406             for (uint32_t j = 0; j < descCount; ++j) {
6407                 if (isDescriptorTypeImageInfo(descType)) {
6408                     ++info.imageInfoCount;
6409                 } else if (isDescriptorTypeBufferInfo(descType)) {
6410                     ++info.bufferInfoCount;
6411                 } else if (isDescriptorTypeBufferView(descType)) {
6412                     ++info.bufferViewCount;
6413                 } else {
6414                     mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6415                     // abort();
6416                 }
6417             }
6418         }
6419     }
6420 
6421     if (info.templateEntryCount)
6422         info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6423 
6424     if (info.imageInfoCount) {
6425         info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6426         info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6427     }
6428 
6429     if (info.bufferInfoCount) {
6430         info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6431         info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6432     }
6433 
6434     if (info.bufferViewCount) {
6435         info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6436         info.bufferViews = new VkBufferView[info.bufferViewCount];
6437     }
6438 
6439     if (info.inlineUniformBlockCount) {
6440         info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6441         info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6442     }
6443 
6444     uint32_t imageInfoIndex = 0;
6445     uint32_t bufferInfoIndex = 0;
6446     uint32_t bufferViewIndex = 0;
6447     uint32_t inlineUniformBlockIndex = 0;
6448 
6449     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6450         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6451         uint32_t descCount = entry.descriptorCount;
6452         VkDescriptorType descType = entry.descriptorType;
6453 
6454         info.templateEntries[i] = entry;
6455 
6456         if (isDescriptorTypeInlineUniformBlock(descType)) {
6457             info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6458             ++inlineUniformBlockIndex;
6459         } else {
6460             for (uint32_t j = 0; j < descCount; ++j) {
6461                 if (isDescriptorTypeImageInfo(descType)) {
6462                     info.imageInfoIndices[imageInfoIndex] = i;
6463                     ++imageInfoIndex;
6464                 } else if (isDescriptorTypeBufferInfo(descType)) {
6465                     info.bufferInfoIndices[bufferInfoIndex] = i;
6466                     ++bufferInfoIndex;
6467                 } else if (isDescriptorTypeBufferView(descType)) {
6468                     info.bufferViewIndices[bufferViewIndex] = i;
6469                     ++bufferViewIndex;
6470                 } else {
6471                     mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6472                     // abort();
6473                 }
6474             }
6475         }
6476     }
6477 
6478     return VK_SUCCESS;
6479 }
6480 
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6481 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6482     void* context, VkResult input_result, VkDevice device,
6483     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6484     const VkAllocationCallbacks* pAllocator,
6485     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6486     (void)context;
6487     (void)device;
6488     (void)pAllocator;
6489 
6490     if (input_result != VK_SUCCESS) return input_result;
6491 
6492     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6493 }
6494 
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6495 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6496     void* context, VkResult input_result, VkDevice device,
6497     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6498     const VkAllocationCallbacks* pAllocator,
6499     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6500     (void)context;
6501     (void)device;
6502     (void)pAllocator;
6503 
6504     if (input_result != VK_SUCCESS) return input_result;
6505 
6506     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6507 }
6508 
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6509 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6510     void* context, VkDevice device, VkDescriptorSet descriptorSet,
6511     VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6512     VkEncoder* enc = (VkEncoder*)context;
6513 
6514     uint8_t* userBuffer = (uint8_t*)pData;
6515     if (!userBuffer) return;
6516 
6517     // TODO: Make this thread safe
6518     std::unique_lock<std::recursive_mutex> lock(mLock);
6519 
6520     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6521     if (it == info_VkDescriptorUpdateTemplate.end()) {
6522         return;
6523     }
6524 
6525     auto& info = it->second;
6526 
6527     uint32_t templateEntryCount = info.templateEntryCount;
6528     VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6529 
6530     uint32_t imageInfoCount = info.imageInfoCount;
6531     uint32_t bufferInfoCount = info.bufferInfoCount;
6532     uint32_t bufferViewCount = info.bufferViewCount;
6533     uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount;
6534     uint32_t* imageInfoIndices = info.imageInfoIndices;
6535     uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6536     uint32_t* bufferViewIndices = info.bufferViewIndices;
6537     VkDescriptorImageInfo* imageInfos = info.imageInfos;
6538     VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6539     VkBufferView* bufferViews = info.bufferViews;
6540     uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6541     uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6542 
6543     lock.unlock();
6544 
6545     size_t currImageInfoOffset = 0;
6546     size_t currBufferInfoOffset = 0;
6547     size_t currBufferViewOffset = 0;
6548     size_t inlineUniformBlockOffset = 0;
6549     size_t inlineUniformBlockIdx = 0;
6550 
6551     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6552     ReifiedDescriptorSet* reified = ds->reified;
6553 
6554     bool batched = mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate;
6555 
6556     for (uint32_t i = 0; i < templateEntryCount; ++i) {
6557         const auto& entry = templateEntries[i];
6558         VkDescriptorType descType = entry.descriptorType;
6559         uint32_t dstBinding = entry.dstBinding;
6560 
6561         auto offset = entry.offset;
6562         auto stride = entry.stride;
6563         auto dstArrayElement = entry.dstArrayElement;
6564 
6565         uint32_t descCount = entry.descriptorCount;
6566 
6567         if (isDescriptorTypeImageInfo(descType)) {
6568             if (!stride) stride = sizeof(VkDescriptorImageInfo);
6569 
6570             const VkDescriptorImageInfo* currImageInfoBegin =
6571                 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6572 
6573             for (uint32_t j = 0; j < descCount; ++j) {
6574                 const VkDescriptorImageInfo* user =
6575                     (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6576 
6577                 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6578                        sizeof(VkDescriptorImageInfo));
6579                 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6580             }
6581 
6582             if (batched) {
6583                 doEmulatedDescriptorImageInfoWriteFromTemplate(
6584                     descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6585             }
6586         } else if (isDescriptorTypeBufferInfo(descType)) {
6587             if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6588 
6589             const VkDescriptorBufferInfo* currBufferInfoBegin =
6590                 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6591 
6592             for (uint32_t j = 0; j < descCount; ++j) {
6593                 const VkDescriptorBufferInfo* user =
6594                     (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6595 
6596                 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6597                        sizeof(VkDescriptorBufferInfo));
6598 
6599                 // TODO(b/355497683): move this into gfxstream_vk_UpdateDescriptorSetWithTemplate().
6600 #if defined(__linux__) || defined(VK_USE_PLATFORM_ANDROID_KHR)
6601                 // Convert mesa to internal for objects in the user buffer
6602                 VkDescriptorBufferInfo* internalBufferInfo =
6603                     (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6604                 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6605                 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6606 #endif
6607                 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6608             }
6609 
6610             if (batched) {
6611                 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6612                     descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6613             }
6614 
6615         } else if (isDescriptorTypeBufferView(descType)) {
6616             if (!stride) stride = sizeof(VkBufferView);
6617 
6618             const VkBufferView* currBufferViewBegin =
6619                 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6620 
6621             for (uint32_t j = 0; j < descCount; ++j) {
6622                 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6623 
6624                 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6625                 currBufferViewOffset += sizeof(VkBufferView);
6626             }
6627 
6628             if (batched) {
6629                 doEmulatedDescriptorBufferViewWriteFromTemplate(
6630                     descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6631             }
6632         } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6633             uint32_t inlineUniformBlockBytesPerBlock =
6634                 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6635             uint8_t* currInlineUniformBlockBufferBegin =
6636                 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6637             memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6638                    inlineUniformBlockBytesPerBlock);
6639             inlineUniformBlockIdx++;
6640             inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6641 
6642             if (batched) {
6643                 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6644                     descType, dstBinding, dstArrayElement, descCount,
6645                     currInlineUniformBlockBufferBegin, reified);
6646             }
6647         } else {
6648             mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6649             abort();
6650         }
6651     }
6652 
6653     if (batched) return;
6654 
6655     enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6656         device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6657         bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6658         imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6659         bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6660 }
6661 
on_vkUpdateDescriptorSetWithTemplateKHR(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6662 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR(
6663     void* context, VkDevice device, VkDescriptorSet descriptorSet,
6664     VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6665     on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate,
6666                                          pData);
6667 }
6668 
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6669 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6670     bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6671     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6672     VkImageFormatProperties2* pImageFormatProperties) {
6673     VkEncoder* enc = (VkEncoder*)context;
6674     (void)input_result;
6675 
6676     VkPhysicalDeviceImageFormatInfo2 localImageFormatInfo = *pImageFormatInfo;
6677 
6678     uint32_t supportedHandleType = 0;
6679     VkExternalImageFormatProperties* ext_img_properties =
6680         vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6681 
6682 #ifdef VK_USE_PLATFORM_FUCHSIA
6683 
6684     constexpr VkFormat kExternalImageSupportedFormats[] = {
6685         VK_FORMAT_B8G8R8A8_SINT,  VK_FORMAT_B8G8R8A8_UNORM,   VK_FORMAT_B8G8R8A8_SRGB,
6686         VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6687         VK_FORMAT_R8G8B8A8_SINT,  VK_FORMAT_R8G8B8A8_UNORM,   VK_FORMAT_R8G8B8A8_SRGB,
6688         VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6689         VK_FORMAT_R8_UNORM,       VK_FORMAT_R8_UINT,          VK_FORMAT_R8_USCALED,
6690         VK_FORMAT_R8_SNORM,       VK_FORMAT_R8_SINT,          VK_FORMAT_R8_SSCALED,
6691         VK_FORMAT_R8_SRGB,        VK_FORMAT_R8G8_UNORM,       VK_FORMAT_R8G8_UINT,
6692         VK_FORMAT_R8G8_USCALED,   VK_FORMAT_R8G8_SNORM,       VK_FORMAT_R8G8_SINT,
6693         VK_FORMAT_R8G8_SSCALED,   VK_FORMAT_R8G8_SRGB,
6694     };
6695 
6696     if (ext_img_properties) {
6697         if (std::find(std::begin(kExternalImageSupportedFormats),
6698                       std::end(kExternalImageSupportedFormats),
6699                       pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6700             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6701         }
6702     }
6703     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6704 #endif
6705 
6706 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6707     VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6708         vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
6709     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6710                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6711 #endif
6712     const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6713         vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6714     if (supportedHandleType && ext_img_info) {
6715         // 0 is a valid handleType so we don't check against 0
6716         if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6717             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6718         }
6719     }
6720 
6721 #ifdef LINUX_GUEST_BUILD
6722     VkImageDrmFormatModifierExplicitCreateInfoEXT localDrmFormatModifierInfo;
6723 
6724     const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* drmFmtMod =
6725         vk_find_struct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>(pImageFormatInfo);
6726     VkDrmFormatModifierPropertiesListEXT* emulatedDrmFmtModPropsList = nullptr;
6727     if (drmFmtMod) {
6728         if (getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) != -1) {
6729             // Host supports DRM format modifiers => leave the input unchanged.
6730         } else {
6731             mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_LINEAR");
6732             emulatedDrmFmtModPropsList =
6733                 vk_find_struct<VkDrmFormatModifierPropertiesListEXT>(pImageFormatProperties);
6734 
6735             // Host doesn't support DRM format modifiers, try emulating.
6736             if (drmFmtMod) {
6737                 if (drmFmtMod->drmFormatModifier == DRM_FORMAT_MOD_LINEAR) {
6738                     localImageFormatInfo.tiling = VK_IMAGE_TILING_LINEAR;
6739                     pImageFormatInfo = &localImageFormatInfo;
6740                     // Leave drmFormatMod in the input; it should be ignored when
6741                     // tiling is not VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
6742                 } else {
6743                     return VK_ERROR_FORMAT_NOT_SUPPORTED;
6744                 }
6745             }
6746         }
6747     }
6748 #endif  // LINUX_GUEST_BUILD
6749 
6750     VkResult hostRes;
6751 
6752     if (isKhr) {
6753         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6754             physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6755     } else {
6756         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6757             physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6758     }
6759 
6760     if (hostRes != VK_SUCCESS) return hostRes;
6761 
6762 #ifdef LINUX_GUEST_BUILD
6763     if (emulatedDrmFmtModPropsList) {
6764         VkFormatProperties formatProperties;
6765         enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, localImageFormatInfo.format,
6766                                                  &formatProperties, true /* do lock */);
6767 
6768         emulatedDrmFmtModPropsList->drmFormatModifierCount = 1;
6769         if (emulatedDrmFmtModPropsList->pDrmFormatModifierProperties) {
6770             emulatedDrmFmtModPropsList->pDrmFormatModifierProperties[0] = {
6771                 .drmFormatModifier = DRM_FORMAT_MOD_LINEAR,
6772                 .drmFormatModifierPlaneCount = 1,
6773                 .drmFormatModifierTilingFeatures = formatProperties.linearTilingFeatures,
6774             };
6775         }
6776     }
6777 #endif  // LINUX_GUEST_BUILD
6778 
6779 #ifdef VK_USE_PLATFORM_FUCHSIA
6780     if (ext_img_properties) {
6781         if (ext_img_info) {
6782             if (static_cast<uint32_t>(ext_img_info->handleType) ==
6783                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6784                 ext_img_properties->externalMemoryProperties = {
6785                     .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6786                                               VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6787                     .exportFromImportedHandleTypes =
6788                         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6789                     .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6790                 };
6791             }
6792         }
6793     }
6794 #endif
6795 
6796 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6797     if (output_ahw_usage) {
6798         output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6799             pImageFormatInfo->flags, pImageFormatInfo->usage);
6800     }
6801 #endif
6802     if (ext_img_properties) {
6803         transformImpl_VkExternalMemoryProperties_fromhost(
6804             &ext_img_properties->externalMemoryProperties, 0);
6805     }
6806     return hostRes;
6807 }
6808 
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6809 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6810     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6811     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6812     VkImageFormatProperties2* pImageFormatProperties) {
6813     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6814         false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6815         pImageFormatProperties);
6816 }
6817 
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6818 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6819     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6820     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6821     VkImageFormatProperties2* pImageFormatProperties) {
6822     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6823         true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6824         pImageFormatProperties);
6825 }
6826 
on_vkGetPhysicalDeviceExternalBufferProperties_common(bool isKhr,void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6827 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6828     bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6829     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6830     VkExternalBufferProperties* pExternalBufferProperties) {
6831     VkEncoder* enc = (VkEncoder*)context;
6832 
6833 #if defined(ANDROID)
6834     // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6835     // with GPU usage (b/299520213).
6836     if (mGralloc->treatBlobAsImage() &&
6837         pExternalBufferInfo->handleType ==
6838             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6839         pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6840         pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6841         pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6842         return;
6843     }
6844 #endif
6845 
6846     uint32_t supportedHandleType = 0;
6847 #ifdef VK_USE_PLATFORM_FUCHSIA
6848     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6849 #endif
6850 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6851     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6852                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6853 #endif
6854     if (supportedHandleType) {
6855         // 0 is a valid handleType so we can't check against 0
6856         if (pExternalBufferInfo->handleType !=
6857             (pExternalBufferInfo->handleType & supportedHandleType)) {
6858             return;
6859         }
6860     }
6861 
6862     if (isKhr) {
6863         enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6864             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6865     } else {
6866         enc->vkGetPhysicalDeviceExternalBufferProperties(
6867             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6868     }
6869     transformImpl_VkExternalMemoryProperties_fromhost(
6870         &pExternalBufferProperties->externalMemoryProperties, 0);
6871 }
6872 
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6873 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6874     void* context, VkPhysicalDevice physicalDevice,
6875     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6876     VkExternalBufferProperties* pExternalBufferProperties) {
6877     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6878         false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
6879         pExternalBufferProperties);
6880 }
6881 
on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfoKHR * pExternalBufferInfo,VkExternalBufferPropertiesKHR * pExternalBufferProperties)6882 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6883     void* context, VkPhysicalDevice physicalDevice,
6884     const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6885     VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6886     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6887         true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
6888 }
6889 
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6890 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6891     void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6892     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6893     (void)pExternalSemaphoreInfo;
6894     (void)pExternalSemaphoreProperties;
6895 #ifdef VK_USE_PLATFORM_FUCHSIA
6896     if (pExternalSemaphoreInfo->handleType ==
6897         static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6898         pExternalSemaphoreProperties->compatibleHandleTypes |=
6899             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6900         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6901             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6902         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6903             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6904             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6905     }
6906 #else
6907     const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6908         vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo);
6909     bool isSemaphoreTimeline =
6910         semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6911     if (isSemaphoreTimeline) {
6912         // b/304373623
6913         // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6914         pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6915         pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6916         pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6917     } else if (pExternalSemaphoreInfo->handleType ==
6918                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6919         pExternalSemaphoreProperties->compatibleHandleTypes |=
6920             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6921         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6922             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6923         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6924             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6925             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6926     }
6927 #endif  // VK_USE_PLATFORM_FUCHSIA
6928 }
6929 
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6930 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6931     void* context, VkPhysicalDevice physicalDevice,
6932     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6933     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6934     on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6935         context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6936 }
6937 
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6938 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6939                                                      CleanupCallback callback) {
6940     std::lock_guard<std::recursive_mutex> lock(mLock);
6941     auto& callbacks = mEncoderCleanupCallbacks[encoder];
6942     callbacks[object] = callback;
6943 }
6944 
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6945 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6946     std::lock_guard<std::recursive_mutex> lock(mLock);
6947     mEncoderCleanupCallbacks[encoder].erase(object);
6948 }
6949 
onEncoderDeleted(const VkEncoder * encoder)6950 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6951     std::unique_lock<std::recursive_mutex> lock(mLock);
6952     if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6953 
6954     std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6955 
6956     mEncoderCleanupCallbacks.erase(encoder);
6957     lock.unlock();
6958 
6959     for (auto it : callbackCopies) {
6960         it.second();
6961     }
6962 }
6963 
getAlloc()6964 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6965     if (mFeatureInfo.hasVulkanAuxCommandMemory) {
6966         return [this](size_t size) -> CommandBufferStagingStream::Memory {
6967             VkMemoryAllocateInfo info{
6968                 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6969                 .pNext = nullptr,
6970                 .allocationSize = size,
6971                 .memoryTypeIndex = VK_MAX_MEMORY_TYPES  // indicates auxiliary memory
6972             };
6973 
6974             auto enc = ResourceTracker::getThreadLocalEncoder();
6975             VkDevice device = VK_NULL_HANDLE;
6976             VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6977             VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6978             if (result != VK_SUCCESS) {
6979                 mesa_loge("Failed to get coherent memory %u", result);
6980                 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6981             }
6982 
6983             // getCoherentMemory() uses suballocations.
6984             // To retrieve the suballocated memory address, look up
6985             // VkDeviceMemory filled in by getCoherentMemory()
6986             // scope of mLock
6987             {
6988                 std::lock_guard<std::recursive_mutex> lock(mLock);
6989                 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
6990                 if (it == info_VkDeviceMemory.end()) {
6991                     mesa_loge("Coherent memory allocated %u not found", result);
6992                     return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6993                 };
6994 
6995                 const auto& info = it->second;
6996                 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
6997             }
6998         };
6999     }
7000     return nullptr;
7001 }
7002 
getFree()7003 CommandBufferStagingStream::Free ResourceTracker::getFree() {
7004     if (mFeatureInfo.hasVulkanAuxCommandMemory) {
7005         return [this](const CommandBufferStagingStream::Memory& memory) {
7006             // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
7007             // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
7008             // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
7009             VkDeviceMemory deviceMemory = memory.deviceMemory;
7010             std::unique_lock<std::recursive_mutex> lock(mLock);
7011             auto it = info_VkDeviceMemory.find(deviceMemory);
7012             if (it == info_VkDeviceMemory.end()) {
7013                 mesa_loge("Device memory to free not found");
7014                 return;
7015             }
7016             auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
7017             // We have to release the lock before we could possibly free a
7018             // CoherentMemory, because that will call into VkEncoder, which
7019             // shouldn't be called when the lock is held.
7020             lock.unlock();
7021             coherentMemory = nullptr;
7022         };
7023     }
7024     return nullptr;
7025 }
7026 
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7027 VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
7028                                                   VkCommandBuffer commandBuffer,
7029                                                   const VkCommandBufferBeginInfo* pBeginInfo) {
7030     (void)context;
7031 
7032     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7033                                   true /* also clear pending descriptor sets */);
7034 
7035     VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
7036     (void)input_result;
7037 
7038     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7039     cb->flags = pBeginInfo->flags;
7040 
7041     VkCommandBufferBeginInfo modifiedBeginInfo;
7042 
7043     if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
7044         modifiedBeginInfo = *pBeginInfo;
7045         modifiedBeginInfo.pInheritanceInfo = nullptr;
7046         pBeginInfo = &modifiedBeginInfo;
7047     }
7048 
7049     if (!supportsDeferredCommands()) {
7050         return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
7051     }
7052 
7053     enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
7054 
7055     return VK_SUCCESS;
7056 }
7057 
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)7058 VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
7059                                                 VkCommandBuffer commandBuffer) {
7060     VkEncoder* enc = (VkEncoder*)context;
7061     (void)input_result;
7062 
7063     if (!supportsDeferredCommands()) {
7064         return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
7065     }
7066 
7067     enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
7068 
7069     return VK_SUCCESS;
7070 }
7071 
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7072 VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
7073                                                   VkCommandBuffer commandBuffer,
7074                                                   VkCommandBufferResetFlags flags) {
7075     VkEncoder* enc = (VkEncoder*)context;
7076     (void)input_result;
7077 
7078     if (!supportsDeferredCommands()) {
7079         VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
7080         resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7081                                     true /* also clear pending descriptor sets */);
7082         return res;
7083     }
7084 
7085     enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
7086     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7087                                   true /* also clear pending descriptor sets */);
7088     return VK_SUCCESS;
7089 }
7090 
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)7091 VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
7092                                                VkDevice device,
7093                                                const VkImageViewCreateInfo* pCreateInfo,
7094                                                const VkAllocationCallbacks* pAllocator,
7095                                                VkImageView* pView) {
7096     VkEncoder* enc = (VkEncoder*)context;
7097     (void)input_result;
7098 
7099     VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
7100     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
7101 
7102 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
7103     if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
7104         std::lock_guard<std::recursive_mutex> lock(mLock);
7105 
7106         auto it = info_VkImage.find(pCreateInfo->image);
7107         if (it != info_VkImage.end() && it->second.hasExternalFormat) {
7108             localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat);
7109         }
7110     }
7111     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
7112     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
7113         vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
7114     if (samplerYcbcrConversionInfo) {
7115         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
7116             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
7117             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
7118         }
7119     }
7120 #endif
7121 
7122     return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
7123 }
7124 
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)7125 void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
7126                                               uint32_t commandBufferCount,
7127                                               const VkCommandBuffer* pCommandBuffers) {
7128     VkEncoder* enc = (VkEncoder*)context;
7129 
7130     if (!mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
7131         enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7132                                   true /* do lock */);
7133         return;
7134     }
7135 
7136     struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
7137     for (uint32_t i = 0; i < commandBufferCount; ++i) {
7138         struct goldfish_VkCommandBuffer* secondary =
7139             as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7140         appendObject(&secondary->superObjects, primary);
7141         appendObject(&primary->subObjects, secondary);
7142     }
7143 
7144     enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7145                               true /* do lock */);
7146 }
7147 
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7148 void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
7149                                                  VkPipelineBindPoint pipelineBindPoint,
7150                                                  VkPipelineLayout layout, uint32_t firstSet,
7151                                                  uint32_t descriptorSetCount,
7152                                                  const VkDescriptorSet* pDescriptorSets,
7153                                                  uint32_t dynamicOffsetCount,
7154                                                  const uint32_t* pDynamicOffsets) {
7155     VkEncoder* enc = (VkEncoder*)context;
7156 
7157     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)
7158         addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
7159 
7160     enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
7161                                  descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
7162                                  pDynamicOffsets, true /* do lock */);
7163 }
7164 
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7165 void ResourceTracker::on_vkCmdPipelineBarrier(
7166     void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
7167     VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
7168     uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
7169     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
7170     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
7171     VkEncoder* enc = (VkEncoder*)context;
7172 
7173     std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
7174     updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
7175     for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
7176         VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
7177 
7178 #ifdef VK_USE_PLATFORM_ANDROID_KHR
7179         // Unfortunetly, Android does not yet have a mechanism for sharing the expected
7180         // VkImageLayout when passing around AHardwareBuffer-s so many existing users
7181         // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
7182         // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
7183         // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
7184         // that range may be discarded." Some Vulkan drivers have been observed to actually
7185         // perform the discard which leads to AHardwareBuffer-s being unintentionally
7186         // cleared. See go/ahb-vkimagelayout for more information.
7187         if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
7188             (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
7189              barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
7190             barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7191             // This is not a complete solution as the Vulkan spec does not require that
7192             // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
7193             // but this has been observed to be enough to work for now to avoid clearing
7194             // out images.
7195             // TODO(b/236179843): figure out long term solution.
7196             barrier.oldLayout = barrier.newLayout;
7197         }
7198 #endif
7199 
7200         updatedImageMemoryBarriers.push_back(barrier);
7201     }
7202 
7203     enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7204                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7205                               pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
7206                               updatedImageMemoryBarriers.data(), true /* do lock */);
7207 }
7208 
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7209 void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
7210                                                       VkDescriptorSetLayout descriptorSetLayout,
7211                                                       const VkAllocationCallbacks* pAllocator) {
7212     decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
7213 }
7214 
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)7215 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
7216     void* context, VkResult input_result, VkDevice device,
7217     const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
7218     (void)input_result;
7219 
7220     VkEncoder* enc = (VkEncoder*)context;
7221     VkResult res =
7222         enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7223     if (VK_SUCCESS != res) return res;
7224 
7225     for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7226         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7227         cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7228         cb->device = device;
7229     }
7230 
7231     return res;
7232 }
7233 
7234 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)7235 VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
7236     mesa_logd("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image,
7237               (unsigned long long)get_host_u64_VkImage(image));
7238 
7239     if (mFeatureInfo.hasVirtioGpuNativeSync) {
7240         struct VirtGpuExecBuffer exec = {};
7241         struct gfxstreamCreateQSRIExportVK exportQSRI = {};
7242         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
7243 
7244         uint64_t hostImageHandle = get_host_u64_VkImage(image);
7245 
7246         exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7247         exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7248         exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
7249 
7250         exec.command = static_cast<void*>(&exportQSRI);
7251         exec.command_size = sizeof(exportQSRI);
7252         exec.flags = kFenceOut | kRingIdx;
7253         if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
7254 
7255         *fd = exec.handle.osHandle;
7256     } else {
7257 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
7258         ensureSyncDeviceFd();
7259         goldfish_sync_queue_work(
7260             mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
7261             GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
7262 #endif
7263     }
7264 
7265     mesa_logd("%s: got fd: %d\n", __func__, *fd);
7266     auto imageInfoIt = info_VkImage.find(image);
7267     if (imageInfoIt != info_VkImage.end()) {
7268         auto& imageInfo = imageInfoIt->second;
7269 
7270         // Remove any pending QSRI sync fds that are already signaled.
7271         auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7272         while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7273             int syncFd = *syncFdIt;
7274             int syncWaitRet = mSyncHelper->wait(syncFd, /*timeout msecs*/ 0);
7275             if (syncWaitRet == 0) {
7276                 // Sync fd is signaled.
7277                 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7278                 mSyncHelper->close(syncFd);
7279             } else {
7280                 if (errno != ETIME) {
7281                     mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7282                               __func__, strerror(errno), errno);
7283                 }
7284                 break;
7285             }
7286         }
7287 
7288         int syncFdDup = mSyncHelper->dup(*fd);
7289         if (syncFdDup < 0) {
7290             mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
7291                       strerror(errno), errno);
7292         } else {
7293             imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7294         }
7295     }
7296 
7297     return VK_SUCCESS;
7298 }
7299 
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)7300 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
7301                                                               VkQueue queue,
7302                                                               uint32_t waitSemaphoreCount,
7303                                                               const VkSemaphore* pWaitSemaphores,
7304                                                               VkImage image, int* pNativeFenceFd) {
7305     (void)input_result;
7306 
7307     VkEncoder* enc = (VkEncoder*)context;
7308 
7309     if (!mFeatureInfo.hasVulkanAsyncQsri) {
7310         return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
7311                                                      image, pNativeFenceFd, true /* lock */);
7312     }
7313 
7314     {
7315         std::lock_guard<std::recursive_mutex> lock(mLock);
7316         auto it = info_VkImage.find(image);
7317         if (it == info_VkImage.end()) {
7318             if (pNativeFenceFd) *pNativeFenceFd = -1;
7319             return VK_ERROR_INITIALIZATION_FAILED;
7320         }
7321     }
7322 
7323     enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
7324                                                      image, true /* lock */);
7325 
7326     std::lock_guard<std::recursive_mutex> lock(mLock);
7327     VkResult result;
7328     if (pNativeFenceFd) {
7329         result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
7330     } else {
7331         int syncFd;
7332         result = exportSyncFdForQSRILocked(image, &syncFd);
7333 
7334         if (syncFd >= 0) {
7335             mSyncHelper->close(syncFd);
7336         }
7337     }
7338 
7339     return result;
7340 }
7341 #endif
7342 
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7343 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7344     void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7345     uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7346     const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7347     (void)input_result;
7348     VkEncoder* enc = (VkEncoder*)context;
7349     std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7350                                                                pCreateInfos + createInfoCount);
7351     for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7352         // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7353         bool requireViewportState = false;
7354         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7355         requireViewportState |=
7356             graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7357             graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7358         // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7359 #ifdef VK_EXT_extended_dynamic_state2
7360         if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7361             for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7362                  i++) {
7363                 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7364                     graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7365                     requireViewportState = true;
7366                     break;
7367                 }
7368             }
7369         }
7370 #endif  // VK_EXT_extended_dynamic_state2
7371         if (!requireViewportState) {
7372             graphicsPipelineCreateInfo.pViewportState = nullptr;
7373         }
7374 
7375         // It has the same requirement as for pViewportState.
7376         bool shouldIncludeFragmentShaderState = requireViewportState;
7377 
7378         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7379         if (!shouldIncludeFragmentShaderState) {
7380             graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7381         }
7382 
7383         bool forceDepthStencilState = false;
7384         bool forceColorBlendState = false;
7385 
7386         const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7387             vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo);
7388 
7389         if (pipelineRenderingInfo) {
7390             forceDepthStencilState |=
7391                 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7392             forceDepthStencilState |=
7393                 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7394             forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7395         }
7396 
7397         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7398         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7399         if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7400             !shouldIncludeFragmentShaderState) {
7401             // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7402             if (!forceDepthStencilState) {
7403                 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7404             }
7405             if (!forceColorBlendState) {
7406                 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7407             }
7408         }
7409     }
7410     return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7411                                           localCreateInfos.data(), pAllocator, pPipelines,
7412                                           true /* do lock */);
7413 }
7414 
getApiVersionFromInstance(VkInstance instance)7415 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) {
7416     std::lock_guard<std::recursive_mutex> lock(mLock);
7417     uint32_t api = kDefaultApiVersion;
7418 
7419     auto it = info_VkInstance.find(instance);
7420     if (it == info_VkInstance.end()) return api;
7421 
7422     api = it->second.highestApiVersion;
7423 
7424     return api;
7425 }
7426 
getApiVersionFromDevice(VkDevice device)7427 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) {
7428     std::lock_guard<std::recursive_mutex> lock(mLock);
7429 
7430     uint32_t api = kDefaultApiVersion;
7431 
7432     auto it = info_VkDevice.find(device);
7433     if (it == info_VkDevice.end()) return api;
7434 
7435     api = it->second.apiVersion;
7436 
7437     return api;
7438 }
7439 
hasInstanceExtension(VkInstance instance,const std::string & name)7440 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) {
7441     std::lock_guard<std::recursive_mutex> lock(mLock);
7442 
7443     auto it = info_VkInstance.find(instance);
7444     if (it == info_VkInstance.end()) return false;
7445 
7446     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7447 }
7448 
hasDeviceExtension(VkDevice device,const std::string & name)7449 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) {
7450     std::lock_guard<std::recursive_mutex> lock(mLock);
7451 
7452     auto it = info_VkDevice.find(device);
7453     if (it == info_VkDevice.end()) return false;
7454 
7455     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7456 }
7457 
getDevice(VkCommandBuffer commandBuffer) const7458 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7459     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7460     if (!cb) {
7461         return nullptr;
7462     }
7463     return cb->device;
7464 }
7465 
7466 // Resets staging stream for this command buffer and primary command buffers
7467 // where this command buffer has been recorded. If requested, also clears the pending
7468 // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7469 void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7470                                                     bool alsoResetPrimaries,
7471                                                     bool alsoClearPendingDescriptorSets) {
7472     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7473     if (!cb) {
7474         return;
7475     }
7476     if (cb->privateEncoder) {
7477         sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7478         cb->privateEncoder = nullptr;
7479         cb->privateStream = nullptr;
7480     }
7481 
7482     if (alsoClearPendingDescriptorSets && cb->userPtr) {
7483         CommandBufferPendingDescriptorSets* pendingSets =
7484             (CommandBufferPendingDescriptorSets*)cb->userPtr;
7485         pendingSets->sets.clear();
7486     }
7487 
7488     if (alsoResetPrimaries) {
7489         forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7490                                          alsoClearPendingDescriptorSets](void* obj) {
7491             VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7492             struct goldfish_VkCommandBuffer* superCb =
7493                 as_goldfish_VkCommandBuffer(superCommandBuffer);
7494             this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7495                                                 alsoClearPendingDescriptorSets);
7496         });
7497         eraseObjects(&cb->superObjects);
7498     }
7499 
7500     forAllObjects(cb->subObjects, [cb](void* obj) {
7501         VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7502         struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7503         // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7504         // since the user still might have submittable stuff pending there.
7505         eraseObject(&subCb->superObjects, (void*)cb);
7506     });
7507 
7508     eraseObjects(&cb->subObjects);
7509 }
7510 
7511 // Unlike resetCommandBufferStagingInfo, this does not always erase its
7512 // superObjects pointers because the command buffer has merely been
7513 // submitted, not reset.  However, if the command buffer was recorded with
7514 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7515 //
7516 // Also, we save the set of descriptor sets referenced by this command
7517 // buffer because we only submitted the command buffer and it's possible to
7518 // update the descriptor set again and re-submit the same command without
7519 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)7520 void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7521     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7522     if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7523         resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7524                                       true /* clear pending descriptor sets */);
7525     } else {
7526         resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7527                                       false /* Don't clear pending descriptor sets */);
7528     }
7529 }
7530 
resetCommandPoolStagingInfo(VkCommandPool commandPool)7531 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7532     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7533 
7534     if (!p) return;
7535 
7536     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7537         this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7538                                             true /* also reset primaries */,
7539                                             true /* also clear pending descriptor sets */);
7540     });
7541 }
7542 
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7543 void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7544                                        VkCommandBuffer* pCommandBuffers) {
7545     for (uint32_t i = 0; i < commandBufferCount; ++i) {
7546         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7547         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7548         appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7549         appendObject(&cb->poolObjects, (void*)commandPool);
7550     }
7551 }
7552 
clearCommandPool(VkCommandPool commandPool)7553 void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7554     resetCommandPoolStagingInfo(commandPool);
7555     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7556     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7557         this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7558     });
7559     eraseObjects(&p->subObjects);
7560 }
7561 
getPhysicalDeviceMemoryProperties(void * context,VkDevice device,VkPhysicalDevice physicalDevice)7562 const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7563     void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7564     if (!mCachedPhysicalDeviceMemoryProps) {
7565         if (physicalDevice == VK_NULL_HANDLE) {
7566             std::lock_guard<std::recursive_mutex> lock(mLock);
7567 
7568             auto deviceInfoIt = info_VkDevice.find(device);
7569             if (deviceInfoIt == info_VkDevice.end()) {
7570                 mesa_loge("Failed to pass device or physical device.");
7571                 abort();
7572             }
7573             const auto& deviceInfo = deviceInfoIt->second;
7574             physicalDevice = deviceInfo.physdev;
7575         }
7576 
7577         VkEncoder* enc = (VkEncoder*)context;
7578 
7579         VkPhysicalDeviceMemoryProperties properties;
7580         enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7581 
7582         mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7583     }
7584     return *mCachedPhysicalDeviceMemoryProps;
7585 }
7586 
7587 static ResourceTracker* sTracker = nullptr;
7588 
ResourceTracker()7589 ResourceTracker::ResourceTracker() {
7590     mCreateMapping = new CreateMapping();
7591     mDestroyMapping = new DestroyMapping();
7592     // nothing to do
7593 }
7594 
~ResourceTracker()7595 ResourceTracker::~ResourceTracker() {
7596     delete mCreateMapping;
7597     delete mDestroyMapping;
7598 }
7599 
createMapping()7600 VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7601 
destroyMapping()7602 VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7603 
7604 // static
get()7605 ResourceTracker* ResourceTracker::get() {
7606     if (!sTracker) {
7607         // To be initialized once on vulkan device open.
7608         sTracker = new ResourceTracker;
7609     }
7610     return sTracker;
7611 }
7612 
7613 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7614 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7615     VkCommandBuffer commandBuffer) {
7616     if (!(ResourceTracker::streamFeatureBits &
7617           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7618         auto enc = ResourceTracker::getThreadLocalEncoder();
7619         ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7620         return enc;
7621     }
7622 
7623     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7624     if (!cb->privateEncoder) {
7625         sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7626                               ResourceTracker::get()->getFree());
7627         sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7628     }
7629     uint8_t* writtenPtr;
7630     size_t written;
7631     ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7632     return cb->privateEncoder;
7633 }
7634 
7635 // static
getQueueEncoder(VkQueue queue)7636 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7637     auto enc = ResourceTracker::getThreadLocalEncoder();
7638     if (!(ResourceTracker::streamFeatureBits &
7639           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7640         ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7641     }
7642     return enc;
7643 }
7644 
7645 // static
getThreadLocalEncoder()7646 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7647     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7648     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7649     return vkEncoder;
7650 }
7651 
7652 // static
setSeqnoPtr(uint32_t * seqnoptr)7653 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
7654 
7655 // static
nextSeqno()7656 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
7657     uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7658     return res;
7659 }
7660 
7661 // static
getSeqno()7662 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
7663     uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7664     return res;
7665 }
7666 
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)7667 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7668                                                                       uint32_t) {}
7669 
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)7670 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
7671 }
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)7672 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
7673 
7674 #define DEFINE_TRANSFORMED_TYPE_IMPL(type)                                  \
7675     void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7676     void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
7677 
7678 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
7679 
7680 }  // namespace vk
7681 }  // namespace gfxstream
7682