1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkImage.h"
9
10 #include "include/core/SkSize.h"
11 #include "include/gpu/vk/VulkanMutableTextureState.h"
12 #include "src/gpu/ganesh/vk/GrVkCaps.h"
13 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
14 #include "src/gpu/ganesh/vk/GrVkGpu.h"
15 #include "src/gpu/ganesh/vk/GrVkImageView.h"
16 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
17 #include "src/gpu/ganesh/vk/GrVkUniformHandler.h"
18 #include "src/gpu/ganesh/vk/GrVkUtil.h"
19 #include "src/gpu/vk/VulkanMemory.h"
20 #include "src/gpu/vk/VulkanMutableTextureStatePriv.h"
21 #include "src/gpu/vk/VulkanUtilsPriv.h"
22
23 #include <string.h>
24 #include <functional>
25 #include <utility>
26
27 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
28
MakeStencil(GrVkGpu * gpu,SkISize dimensions,int sampleCnt,VkFormat format)29 sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
30 SkISize dimensions,
31 int sampleCnt,
32 VkFormat format) {
33 VkImageUsageFlags vkUsageFlags =
34 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
35 return GrVkImage::Make(gpu,
36 dimensions,
37 UsageFlags::kStencilAttachment,
38 sampleCnt,
39 format,
40 /*mipLevels=*/1,
41 vkUsageFlags,
42 GrProtected::kNo,
43 GrMemoryless::kNo,
44 skgpu::Budgeted::kYes);
45 }
46
MakeMSAA(GrVkGpu * gpu,SkISize dimensions,int numSamples,VkFormat format,GrProtected isProtected,GrMemoryless memoryless)47 sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
48 SkISize dimensions,
49 int numSamples,
50 VkFormat format,
51 GrProtected isProtected,
52 GrMemoryless memoryless) {
53 SkASSERT(numSamples > 1);
54
55 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
56 if (memoryless == GrMemoryless::kYes) {
57 vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
58 } else {
59 vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
60 }
61 return GrVkImage::Make(gpu,
62 dimensions,
63 UsageFlags::kColorAttachment,
64 numSamples,
65 format,
66 /*mipLevels=*/1,
67 vkUsageFlags,
68 isProtected,
69 memoryless,
70 skgpu::Budgeted::kYes);
71 }
72
MakeTexture(GrVkGpu * gpu,SkISize dimensions,VkFormat format,uint32_t mipLevels,GrRenderable renderable,int numSamples,skgpu::Budgeted budgeted,GrProtected isProtected)73 sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
74 SkISize dimensions,
75 VkFormat format,
76 uint32_t mipLevels,
77 GrRenderable renderable,
78 int numSamples,
79 skgpu::Budgeted budgeted,
80 GrProtected isProtected) {
81 UsageFlags usageFlags = UsageFlags::kTexture;
82 VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
83 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
84 if (renderable == GrRenderable::kYes) {
85 usageFlags |= UsageFlags::kColorAttachment;
86 vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
87 // We always make our render targets support being used as input attachments
88 vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
89 }
90
91 return GrVkImage::Make(gpu,
92 dimensions,
93 usageFlags,
94 numSamples,
95 format,
96 mipLevels,
97 vkUsageFlags,
98 isProtected,
99 GrMemoryless::kNo,
100 budgeted);
101 }
102
make_views(GrVkGpu * gpu,const GrVkImageInfo & info,GrAttachment::UsageFlags attachmentUsages,sk_sp<const GrVkImageView> * framebufferView,sk_sp<const GrVkImageView> * textureView)103 static bool make_views(GrVkGpu* gpu,
104 const GrVkImageInfo& info,
105 GrAttachment::UsageFlags attachmentUsages,
106 sk_sp<const GrVkImageView>* framebufferView,
107 sk_sp<const GrVkImageView>* textureView) {
108 GrVkImageView::Type viewType;
109 if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
110 // If we have stencil usage then we shouldn't have any other usages
111 SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
112 viewType = GrVkImageView::kStencil_Type;
113 } else {
114 viewType = GrVkImageView::kColor_Type;
115 }
116
117 if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
118 SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
119 // Attachments can only have a mip level of 1
120 *framebufferView = GrVkImageView::Make(
121 gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
122 if (!*framebufferView) {
123 return false;
124 }
125 }
126
127 if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
128 *textureView = GrVkImageView::Make(gpu,
129 info.fImage,
130 info.fFormat,
131 viewType,
132 info.fLevelCount,
133 info.fYcbcrConversionInfo);
134 if (!*textureView) {
135 return false;
136 }
137 }
138 return true;
139 }
140
Make(GrVkGpu * gpu,SkISize dimensions,UsageFlags attachmentUsages,int sampleCnt,VkFormat format,uint32_t mipLevels,VkImageUsageFlags vkUsageFlags,GrProtected isProtected,GrMemoryless memoryless,skgpu::Budgeted budgeted)141 sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
142 SkISize dimensions,
143 UsageFlags attachmentUsages,
144 int sampleCnt,
145 VkFormat format,
146 uint32_t mipLevels,
147 VkImageUsageFlags vkUsageFlags,
148 GrProtected isProtected,
149 GrMemoryless memoryless,
150 skgpu::Budgeted budgeted) {
151 GrVkImage::ImageDesc imageDesc;
152 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
153 imageDesc.fFormat = format;
154 imageDesc.fWidth = dimensions.width();
155 imageDesc.fHeight = dimensions.height();
156 imageDesc.fLevels = mipLevels;
157 imageDesc.fSamples = sampleCnt;
158 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
159 imageDesc.fUsageFlags = vkUsageFlags;
160 imageDesc.fIsProtected = isProtected;
161
162 GrVkImageInfo info;
163 if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
164 return nullptr;
165 }
166
167 sk_sp<const GrVkImageView> framebufferView;
168 sk_sp<const GrVkImageView> textureView;
169 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
170 GrVkImage::DestroyImageInfo(gpu, &info);
171 return nullptr;
172 }
173
174 auto mutableState = sk_make_sp<skgpu::MutableTextureState>(
175 skgpu::MutableTextureStates::MakeVulkan(info.fImageLayout, info.fCurrentQueueFamily));
176 return sk_sp<GrVkImage>(new GrVkImage(gpu,
177 dimensions,
178 attachmentUsages,
179 info,
180 std::move(mutableState),
181 std::move(framebufferView),
182 std::move(textureView),
183 budgeted,
184 /*label=*/"MakeVkImage"));
185 }
186
MakeWrapped(GrVkGpu * gpu,SkISize dimensions,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,UsageFlags attachmentUsages,GrWrapOwnership ownership,GrWrapCacheable cacheable,std::string_view label,bool forSecondaryCB)187 sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
188 SkISize dimensions,
189 const GrVkImageInfo& info,
190 sk_sp<skgpu::MutableTextureState> mutableState,
191 UsageFlags attachmentUsages,
192 GrWrapOwnership ownership,
193 GrWrapCacheable cacheable,
194 std::string_view label,
195 bool forSecondaryCB) {
196 sk_sp<const GrVkImageView> framebufferView;
197 sk_sp<const GrVkImageView> textureView;
198 if (!forSecondaryCB) {
199 if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
200 return nullptr;
201 }
202 }
203
204 GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
205 ? GrBackendObjectOwnership::kBorrowed
206 : GrBackendObjectOwnership::kOwned;
207
208 return sk_sp<GrVkImage>(new GrVkImage(gpu,
209 dimensions,
210 attachmentUsages,
211 info,
212 std::move(mutableState),
213 std::move(framebufferView),
214 std::move(textureView),
215 backendOwnership,
216 cacheable,
217 forSecondaryCB,
218 label));
219 }
220
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,skgpu::Budgeted budgeted,std::string_view label)221 GrVkImage::GrVkImage(GrVkGpu* gpu,
222 SkISize dimensions,
223 UsageFlags supportedUsages,
224 const GrVkImageInfo& info,
225 sk_sp<skgpu::MutableTextureState> mutableState,
226 sk_sp<const GrVkImageView> framebufferView,
227 sk_sp<const GrVkImageView> textureView,
228 skgpu::Budgeted budgeted,
229 std::string_view label)
230 : GrAttachment(gpu,
231 dimensions,
232 supportedUsages,
233 info.fSampleCount,
234 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
235 info.fProtected,
236 label,
237 info.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag
238 ? GrMemoryless::kYes
239 : GrMemoryless::kNo)
240 , fInfo(info)
241 , fInitialQueueFamily(info.fCurrentQueueFamily)
242 , fMutableState(std::move(mutableState))
243 , fFramebufferView(std::move(framebufferView))
244 , fTextureView(std::move(textureView))
245 , fIsBorrowed(false) {
246 this->init(gpu, false);
247 this->registerWithCache(budgeted);
248 }
249
GrVkImage(GrVkGpu * gpu,SkISize dimensions,UsageFlags supportedUsages,const GrVkImageInfo & info,sk_sp<skgpu::MutableTextureState> mutableState,sk_sp<const GrVkImageView> framebufferView,sk_sp<const GrVkImageView> textureView,GrBackendObjectOwnership ownership,GrWrapCacheable cacheable,bool forSecondaryCB,std::string_view label)250 GrVkImage::GrVkImage(GrVkGpu* gpu,
251 SkISize dimensions,
252 UsageFlags supportedUsages,
253 const GrVkImageInfo& info,
254 sk_sp<skgpu::MutableTextureState> mutableState,
255 sk_sp<const GrVkImageView> framebufferView,
256 sk_sp<const GrVkImageView> textureView,
257 GrBackendObjectOwnership ownership,
258 GrWrapCacheable cacheable,
259 bool forSecondaryCB,
260 std::string_view label)
261 : GrAttachment(gpu,
262 dimensions,
263 supportedUsages,
264 info.fSampleCount,
265 info.fLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo,
266 info.fProtected,
267 label)
268 , fInfo(info)
269 , fInitialQueueFamily(info.fCurrentQueueFamily)
270 , fMutableState(std::move(mutableState))
271 , fFramebufferView(std::move(framebufferView))
272 , fTextureView(std::move(textureView))
273 , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
274 this->init(gpu, forSecondaryCB);
275 this->registerWithCacheWrapped(cacheable);
276 }
277
init(GrVkGpu * gpu,bool forSecondaryCB)278 void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
279 SkASSERT(skgpu::MutableTextureStates::GetVkImageLayout(fMutableState.get()) == fInfo.fImageLayout);
280 SkASSERT(skgpu::MutableTextureStates::GetVkQueueFamilyIndex(fMutableState.get()) == fInfo.fCurrentQueueFamily);
281 #ifdef SK_DEBUG
282 if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
283 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
284 } else {
285 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
286 SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
287 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
288 !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
289 } else {
290 SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
291 SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
292 SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
293 }
294 }
295 // We can't transfer from the non graphics queue to the graphics queue since we can't
296 // release the image from the original queue without having that queue. This limits us in terms
297 // of the types of queue indices we can handle.
298 if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
299 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
300 fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
301 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
302 if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
303 SkASSERT(false);
304 }
305 } else {
306 SkASSERT(false);
307 }
308 }
309 #endif
310 if (forSecondaryCB) {
311 fResource = nullptr;
312 } else if (fIsBorrowed) {
313 fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
314 } else {
315 SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
316 fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
317 }
318 }
319
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)320 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
321 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
322 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
323 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
324 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
325 return VK_PIPELINE_STAGE_TRANSFER_BIT;
326 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
327 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
328 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
329 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
330 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
331 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
332 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
333 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
334 return VK_PIPELINE_STAGE_HOST_BIT;
335 } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
336 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
337 }
338
339 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
340 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
341 }
342
LayoutToSrcAccessMask(const VkImageLayout layout)343 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
344 // Currently we assume we will never being doing any explict shader writes (this doesn't include
345 // color attachment or depth/stencil writes). So we will ignore the
346 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
347
348 // We can only directly access the host memory if we are in preinitialized or general layout,
349 // and the image is linear.
350 // TODO: Add check for linear here so we are not always adding host to general, and we should
351 // only be in preinitialized if we are linear
352 VkAccessFlags flags = 0;
353 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
354 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
355 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
356 VK_ACCESS_TRANSFER_WRITE_BIT |
357 VK_ACCESS_HOST_WRITE_BIT;
358 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
359 flags = VK_ACCESS_HOST_WRITE_BIT;
360 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
361 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
362 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
363 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
364 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
365 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
366 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
367 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
368 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
369 // There are no writes that need to be made available
370 flags = 0;
371 }
372 return flags;
373 }
374
vk_format_to_aspect_flags(VkFormat format)375 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
376 switch (format) {
377 case VK_FORMAT_S8_UINT:
378 return VK_IMAGE_ASPECT_STENCIL_BIT;
379 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
380 case VK_FORMAT_D32_SFLOAT_S8_UINT:
381 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
382 default:
383 return VK_IMAGE_ASPECT_COLOR_BIT;
384 }
385 }
386
setImageLayoutAndQueueIndex(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,uint32_t newQueueFamilyIndex)387 void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
388 VkImageLayout newLayout,
389 VkAccessFlags dstAccessMask,
390 VkPipelineStageFlags dstStageMask,
391 bool byRegion,
392 uint32_t newQueueFamilyIndex) {
393 // Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
394 #if 0
395 if (fInfo.fAlloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag) {
396 VkDeviceSize size;
397 VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
398
399 SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
400 }
401 #endif
402 SkASSERT(!gpu->isDeviceLost());
403 SkASSERT(newLayout == this->currentLayout() ||
404 (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
405 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
406 VkImageLayout currentLayout = this->currentLayout();
407 uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
408
409 #ifdef SK_DEBUG
410 if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
411 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
412 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
413 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
414 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
415 } else {
416 SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
417 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
418 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
419 }
420 } else {
421 SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
422 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
423 currentQueueIndex == gpu->queueIndex()) {
424 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
425 currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
426 currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
427 currentQueueIndex == gpu->queueIndex());
428 } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
429 newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
430 SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
431 currentQueueIndex == gpu->queueIndex());
432 }
433 }
434 #endif
435
436 if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
437 if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
438 newQueueFamilyIndex = gpu->queueIndex();
439 }
440 if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
441 currentQueueIndex = gpu->queueIndex();
442 }
443 }
444
445 // If the old and new layout are the same and the layout is a read only layout, there is no need
446 // to put in a barrier unless we also need to switch queues.
447 if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
448 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
449 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
450 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
451 return;
452 }
453
454 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
455 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
456
457 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
458
459 VkImageMemoryBarrier imageMemoryBarrier = {
460 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
461 nullptr, // pNext
462 srcAccessMask, // srcAccessMask
463 dstAccessMask, // dstAccessMask
464 currentLayout, // oldLayout
465 newLayout, // newLayout
466 currentQueueIndex, // srcQueueFamilyIndex
467 newQueueFamilyIndex, // dstQueueFamilyIndex
468 fInfo.fImage, // image
469 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
470 };
471 SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
472 gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
473 &imageMemoryBarrier);
474
475 this->updateImageLayout(newLayout);
476 this->setQueueFamilyIndex(newQueueFamilyIndex);
477 }
478
InitImageInfo(GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)479 bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
480 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
481 return false;
482 }
483 if ((imageDesc.fIsProtected == GrProtected::kYes) &&
484 !gpu->vkCaps().supportsProtectedContent()) {
485 return false;
486 }
487
488 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
489 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
490 : VK_IMAGE_LAYOUT_UNDEFINED;
491
492 // Create Image
493 VkSampleCountFlagBits vkSamples;
494 if (!skgpu::SampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
495 return false;
496 }
497
498 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
499 VK_SAMPLE_COUNT_1_BIT == vkSamples);
500
501 VkImageCreateFlags createflags = 0;
502 if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
503 createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
504 }
505 const VkImageCreateInfo imageCreateInfo = {
506 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
507 nullptr, // pNext
508 createflags, // VkImageCreateFlags
509 imageDesc.fImageType, // VkImageType
510 imageDesc.fFormat, // VkFormat
511 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
512 imageDesc.fLevels, // mipLevels
513 1, // arrayLayers
514 vkSamples, // samples
515 imageDesc.fImageTiling, // VkImageTiling
516 imageDesc.fUsageFlags, // VkImageUsageFlags
517 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
518 0, // queueFamilyCount
519 nullptr, // pQueueFamilyIndices
520 initialLayout // initialLayout
521 };
522
523 VkImage image = VK_NULL_HANDLE;
524 VkResult result;
525 GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
526 if (result != VK_SUCCESS) {
527 return false;
528 }
529
530 skgpu::Protected isProtected = gpu->protectedContext() ? skgpu::Protected::kYes
531 : skgpu::Protected::kNo;
532 bool forceDedicatedMemory = gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory();
533 bool useLazyAllocation =
534 SkToBool(imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
535
536 auto checkResult = [gpu, isProtected, forceDedicatedMemory, useLazyAllocation](
537 VkResult result) {
538 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocImageMemory"
539 " (isProtected:%d, forceDedicatedMemory:%d, useLazyAllocation:%d)",
540 (int)isProtected, (int)forceDedicatedMemory,
541 (int)useLazyAllocation);
542 return gpu->checkVkResult(result);
543 };
544 auto allocator = gpu->memoryAllocator();
545 skgpu::VulkanAlloc alloc;
546 if (!skgpu::VulkanMemory::AllocImageMemory(allocator,
547 image,
548 isProtected,
549 forceDedicatedMemory,
550 useLazyAllocation,
551 checkResult,
552 &alloc) ||
553 (useLazyAllocation &&
554 !SkToBool(alloc.fFlags & skgpu::VulkanAlloc::kLazilyAllocated_Flag))) {
555 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
556 return false;
557 }
558
559 // Bind buffer
560 GR_VK_CALL_RESULT(gpu, result, BindImageMemory(gpu->device(),
561 image,
562 alloc.fMemory,
563 alloc.fOffset));
564 if (result) {
565 skgpu::VulkanMemory::FreeImageMemory(allocator, alloc);
566 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
567 return false;
568 }
569
570 info->fImage = image;
571 info->fAlloc = alloc;
572 info->fImageTiling = imageDesc.fImageTiling;
573 info->fImageLayout = initialLayout;
574 info->fFormat = imageDesc.fFormat;
575 info->fImageUsageFlags = imageDesc.fUsageFlags;
576 info->fSampleCount = imageDesc.fSamples;
577 info->fLevelCount = imageDesc.fLevels;
578 info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
579 info->fProtected =
580 (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
581 info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
582 return true;
583 }
584
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)585 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
586 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
587 skgpu::VulkanMemory::FreeImageMemory(gpu->memoryAllocator(), info->fAlloc);
588 }
589
~GrVkImage()590 GrVkImage::~GrVkImage() {
591 // should have been released first
592 SkASSERT(!fResource);
593 SkASSERT(!fFramebufferView);
594 SkASSERT(!fTextureView);
595 }
596
prepareForPresent(GrVkGpu * gpu)597 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
598 VkImageLayout layout = this->currentLayout();
599 if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
600 fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
601 if (gpu->vkCaps().supportsSwapchain()) {
602 layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
603 }
604 }
605 this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
606 fInitialQueueFamily);
607 }
608
prepareForExternal(GrVkGpu * gpu)609 void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
610 this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
611 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
612 fInitialQueueFamily);
613 }
614
releaseImage()615 void GrVkImage::releaseImage() {
616 if (fResource) {
617 fResource->unref();
618 fResource = nullptr;
619 }
620 fFramebufferView.reset();
621 fTextureView.reset();
622 fCachedBlendingInputDescSet.reset();
623 fCachedMSAALoadInputDescSet.reset();
624 }
625
onRelease()626 void GrVkImage::onRelease() {
627 this->releaseImage();
628 GrAttachment::onRelease();
629 }
630
onAbandon()631 void GrVkImage::onAbandon() {
632 this->releaseImage();
633 GrAttachment::onAbandon();
634 }
635
setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper)636 void GrVkImage::setResourceRelease(sk_sp<RefCntedReleaseProc> releaseHelper) {
637 SkASSERT(fResource);
638 // Forward the release proc on to GrVkImage::Resource
639 fResource->setRelease(std::move(releaseHelper));
640 }
641
freeGPUData() const642 void GrVkImage::Resource::freeGPUData() const {
643 this->invokeReleaseProc();
644 VK_CALL(fGpu, DestroyImage(fGpu->device(), fImage, nullptr));
645 skgpu::VulkanMemory::FreeImageMemory(fGpu->memoryAllocator(), fAlloc);
646 }
647
freeGPUData() const648 void GrVkImage::BorrowedResource::freeGPUData() const {
649 this->invokeReleaseProc();
650 }
651
write_input_desc_set(GrVkGpu * gpu,VkImageView view,VkImageLayout layout,VkDescriptorSet descSet)652 static void write_input_desc_set(GrVkGpu* gpu,
653 VkImageView view,
654 VkImageLayout layout,
655 VkDescriptorSet descSet) {
656 VkDescriptorImageInfo imageInfo;
657 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
658 imageInfo.sampler = VK_NULL_HANDLE;
659 imageInfo.imageView = view;
660 imageInfo.imageLayout = layout;
661
662 VkWriteDescriptorSet writeInfo;
663 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
664 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
665 writeInfo.pNext = nullptr;
666 writeInfo.dstSet = descSet;
667 writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
668 writeInfo.dstArrayElement = 0;
669 writeInfo.descriptorCount = 1;
670 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
671 writeInfo.pImageInfo = &imageInfo;
672 writeInfo.pBufferInfo = nullptr;
673 writeInfo.pTexelBufferView = nullptr;
674
675 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
676 }
677
inputDescSetForBlending(GrVkGpu * gpu)678 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
679 if (!this->supportsInputAttachmentUsage()) {
680 return nullptr;
681 }
682 if (fCachedBlendingInputDescSet) {
683 return fCachedBlendingInputDescSet;
684 }
685
686 fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
687 if (!fCachedBlendingInputDescSet) {
688 return nullptr;
689 }
690
691 write_input_desc_set(gpu,
692 this->framebufferView()->imageView(),
693 VK_IMAGE_LAYOUT_GENERAL,
694 *fCachedBlendingInputDescSet->descriptorSet());
695
696 return fCachedBlendingInputDescSet;
697 }
698
inputDescSetForMSAALoad(GrVkGpu * gpu)699 gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
700 if (!this->supportsInputAttachmentUsage()) {
701 return nullptr;
702 }
703 if (fCachedMSAALoadInputDescSet) {
704 return fCachedMSAALoadInputDescSet;
705 }
706
707 fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
708 if (!fCachedMSAALoadInputDescSet) {
709 return nullptr;
710 }
711
712 write_input_desc_set(gpu,
713 this->framebufferView()->imageView(),
714 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
715 *fCachedMSAALoadInputDescSet->descriptorSet());
716
717 return fCachedMSAALoadInputDescSet;
718 }
719
getVkGpu() const720 GrVkGpu* GrVkImage::getVkGpu() const {
721 SkASSERT(!this->wasDestroyed());
722 return static_cast<GrVkGpu*>(this->getGpu());
723 }
724
725 #if defined(GPU_TEST_UTILS)
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)726 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
727 skgpu::MutableTextureStates::SetVkQueueFamilyIndex(fMutableState.get(), gpu->queueIndex());
728 }
729 #endif
730