1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/core/SkSize.h"
13 #include "include/gpu/ganesh/GrBackendSurface.h"
14 #include "include/gpu/ganesh/GrDirectContext.h"
15 #include "include/gpu/ganesh/GrTypes.h"
16 #include "include/gpu/ganesh/vk/GrBackendDrawableInfo.h"
17 #include "include/gpu/ganesh/vk/GrVkTypes.h"
18 #include "include/private/base/SkAssert.h"
19 #include "include/private/base/SkTo.h"
20 #include "include/private/gpu/ganesh/GrTypesPriv.h"
21 #include "src/gpu/GpuRefCnt.h"
22 #include "src/gpu/ganesh/GrAttachment.h"
23 #include "src/gpu/ganesh/GrBackendUtils.h"
24 #include "src/gpu/ganesh/GrBuffer.h"
25 #include "src/gpu/ganesh/GrCaps.h"
26 #include "src/gpu/ganesh/GrDirectContextPriv.h"
27 #include "src/gpu/ganesh/GrDrawIndirectCommand.h"
28 #include "src/gpu/ganesh/GrGpuBuffer.h"
29 #include "src/gpu/ganesh/GrNativeRect.h"
30 #include "src/gpu/ganesh/GrOpFlushState.h"
31 #include "src/gpu/ganesh/GrPipeline.h"
32 #include "src/gpu/ganesh/GrProgramInfo.h"
33 #include "src/gpu/ganesh/GrRenderTarget.h"
34 #include "src/gpu/ganesh/GrScissorState.h"
35 #include "src/gpu/ganesh/GrSurfaceProxy.h"
36 #include "src/gpu/ganesh/GrTexture.h"
37 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
38 #include "src/gpu/ganesh/vk/GrVkCaps.h"
39 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
40 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
41 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
42 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
43 #include "src/gpu/ganesh/vk/GrVkGpu.h"
44 #include "src/gpu/ganesh/vk/GrVkImage.h"
45 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
46 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
47 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
48 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
49 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
50 #include "src/gpu/ganesh/vk/GrVkTexture.h"
51
52 #include <algorithm>
53 #include <cstring>
54 #include <functional>
55 #include <utility>
56
57 class GrGpu;
58
59 using namespace skia_private;
60
61 /////////////////////////////////////////////////////////////////////////////
62
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)63 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
64 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
65 switch (loadOpIn) {
66 case GrLoadOp::kLoad:
67 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
68 break;
69 case GrLoadOp::kClear:
70 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
71 break;
72 case GrLoadOp::kDiscard:
73 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
74 break;
75 default:
76 SK_ABORT("Invalid LoadOp");
77 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
78 }
79
80 switch (storeOpIn) {
81 case GrStoreOp::kStore:
82 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
83 break;
84 case GrStoreOp::kDiscard:
85 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
86 break;
87 default:
88 SK_ABORT("Invalid StoreOp");
89 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
90 }
91 }
92
GrVkOpsRenderPass(GrVkGpu * gpu)93 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
94
setAttachmentLayouts(LoadFromResolve loadFromResolve)95 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
96 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
97 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
98
99 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
100 // We need to use the GENERAL layout in this case since we'll be using texture barriers
101 // with an input attachment.
102 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
103 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
104 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
105 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
106 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
107 fFramebuffer->colorAttachment()->setImageLayout(
108 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
109 } else {
110 // Change layout of our render target so it can be used as the color attachment.
111 // TODO: If we know that we will never be blending or loading the attachment we could drop
112 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
113 fFramebuffer->colorAttachment()->setImageLayout(
114 fGpu,
115 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
116 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
117 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
118 false);
119 }
120
121 if (withResolve) {
122 GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
123 SkASSERT(resolveAttachment);
124 if (loadFromResolve == LoadFromResolve::kLoad) {
125 // We need input access to do the shader read and color read access to do the attachment
126 // load.
127 VkAccessFlags dstAccess =
128 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
129 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
130 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
131 resolveAttachment->setImageLayout(fGpu,
132 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
133 dstAccess,
134 dstStages,
135 false);
136 } else {
137 resolveAttachment->setImageLayout(
138 fGpu,
139 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
140 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
141 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
142 false);
143 }
144 }
145
146 // If we are using a stencil attachment we also need to update its layout
147 if (withStencil) {
148 auto* vkStencil = fFramebuffer->stencilAttachment();
149 SkASSERT(vkStencil);
150
151 // We need the write and read access bits since we may load and store the stencil.
152 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
153 // wait there.
154 vkStencil->setImageLayout(fGpu,
155 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
156 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
157 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
158 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
159 false);
160 }
161 }
162
163 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
164 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
165 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)166 void adjust_bounds_to_granularity(SkIRect* dstBounds,
167 const SkIRect& srcBounds,
168 const VkExtent2D& granularity,
169 int maxWidth,
170 int maxHeight) {
171 // Adjust Width
172 if ((0 != granularity.width && 1 != granularity.width)) {
173 // Start with the right side of rect so we know if we end up going pass the maxWidth.
174 int rightAdj = srcBounds.fRight % granularity.width;
175 if (rightAdj != 0) {
176 rightAdj = granularity.width - rightAdj;
177 }
178 dstBounds->fRight = srcBounds.fRight + rightAdj;
179 if (dstBounds->fRight > maxWidth) {
180 dstBounds->fRight = maxWidth;
181 dstBounds->fLeft = 0;
182 } else {
183 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
184 }
185 } else {
186 dstBounds->fLeft = srcBounds.fLeft;
187 dstBounds->fRight = srcBounds.fRight;
188 }
189
190 // Adjust height
191 if ((0 != granularity.height && 1 != granularity.height)) {
192 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
193 int bottomAdj = srcBounds.fBottom % granularity.height;
194 if (bottomAdj != 0) {
195 bottomAdj = granularity.height - bottomAdj;
196 }
197 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
198 if (dstBounds->fBottom > maxHeight) {
199 dstBounds->fBottom = maxHeight;
200 dstBounds->fTop = 0;
201 } else {
202 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
203 }
204 } else {
205 dstBounds->fTop = srcBounds.fTop;
206 dstBounds->fBottom = srcBounds.fBottom;
207 }
208 }
209
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)210 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
211 LoadFromResolve loadFromResolve) {
212 this->setAttachmentLayouts(loadFromResolve);
213
214 bool firstSubpassUsesSecondaryCB =
215 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
216
217 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
218 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
219
220 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
221
222 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
223 fOrigin,
224 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
225
226 // The bounds we use for the render pass should be of the granularity supported
227 // by the device.
228 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
229 SkIRect adjustedBounds;
230 if ((0 != granularity.width && 1 != granularity.width) ||
231 (0 != granularity.height && 1 != granularity.height)) {
232 adjust_bounds_to_granularity(&adjustedBounds,
233 nativeBounds,
234 granularity,
235 dimensions.width(),
236 dimensions.height());
237 } else {
238 adjustedBounds = nativeBounds;
239 }
240
241 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
242 adjustedBounds, firstSubpassUsesSecondaryCB)) {
243 if (fCurrentSecondaryCommandBuffer) {
244 fCurrentSecondaryCommandBuffer->end(fGpu);
245 }
246 fCurrentRenderPass = nullptr;
247 return false;
248 }
249
250 if (loadFromResolve == LoadFromResolve::kLoad) {
251 this->loadResolveIntoMSAA(adjustedBounds);
252 }
253
254 return true;
255 }
256
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)257 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
258 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
259 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
260 VkAttachmentLoadOp loadOp;
261 VkAttachmentStoreOp storeOp;
262 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
263 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
264
265 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
266 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
267
268 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
269 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
270
271 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
272 SkASSERT(rpHandle.isValid());
273 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
274 vkColorOps,
275 vkResolveOps,
276 vkStencilOps);
277
278 if (!fCurrentRenderPass) {
279 return false;
280 }
281
282 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
283 SkASSERT(fGpu->cmdPool());
284 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
285 if (!fCurrentSecondaryCommandBuffer) {
286 fCurrentRenderPass = nullptr;
287 return false;
288 }
289 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
290 }
291
292 VkClearValue vkClearColor;
293 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
294 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
295 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
296 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
297
298 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
299 }
300
initWrapped()301 bool GrVkOpsRenderPass::initWrapped() {
302 SkASSERT(fFramebuffer->isExternal());
303 fCurrentRenderPass = fFramebuffer->externalRenderPass();
304 SkASSERT(fCurrentRenderPass);
305 fCurrentRenderPass->ref();
306
307 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
308 if (!fCurrentSecondaryCommandBuffer) {
309 return false;
310 }
311 return true;
312 }
313
~GrVkOpsRenderPass()314 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
315 this->reset();
316 }
317
gpu()318 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
319
currentCommandBuffer()320 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
321 if (fCurrentSecondaryCommandBuffer) {
322 return fCurrentSecondaryCommandBuffer.get();
323 }
324 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
325 // are still using this object.
326 SkASSERT(fGpu->currentCommandBuffer());
327 return fGpu->currentCommandBuffer();
328 }
329
loadResolveIntoMSAA(const SkIRect & nativeBounds)330 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
331 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
332 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
333 nativeBounds);
334 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
335
336 // If we loaded the resolve attachment, then we would have set the image layout to be
337 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
338 // attachment. However, when we switched to the main subpass it will transition the layout
339 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
340 // of the layout to match the new layout.
341 SkASSERT(fFramebuffer->resolveAttachment());
342 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
343 }
344
submit()345 void GrVkOpsRenderPass::submit() {
346 if (!fRenderTarget) {
347 return;
348 }
349 if (!fCurrentRenderPass) {
350 SkASSERT(fGpu->isDeviceLost());
351 return;
352 }
353
354 // We don't want to actually submit the secondary command buffer if it is wrapped.
355 if (this->wrapsSecondaryCommandBuffer()) {
356 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
357 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
358 // GrVkSecondaryCommandBuffer alive.
359 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
360 std::move(fCurrentSecondaryCommandBuffer));
361 return;
362 }
363
364 if (fCurrentSecondaryCommandBuffer) {
365 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
366 }
367 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
368 }
369
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const TArray<GrSurfaceProxy *,true> & sampledProxies)370 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
371 sk_sp<GrVkFramebuffer> framebuffer,
372 GrSurfaceOrigin origin,
373 const SkIRect& bounds,
374 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
375 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
376 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
377 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
378 GrVkRenderPass::LoadFromResolve loadFromResolve,
379 const TArray<GrSurfaceProxy*, true>& sampledProxies) {
380 SkASSERT(!fRenderTarget);
381 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
382
383 #ifdef SK_DEBUG
384 fIsActive = true;
385 #endif
386
387 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
388 // access it. If the command buffer is valid here should be valid throughout the use of the
389 // render pass since nothing should trigger a submit while this render pass is active.
390 if (!fGpu->currentCommandBuffer()) {
391 return false;
392 }
393
394 this->INHERITED::set(rt, origin);
395
396 for (int i = 0; i < sampledProxies.size(); ++i) {
397 if (sampledProxies[i]->isInstantiated()) {
398 SkASSERT(sampledProxies[i]->asTextureProxy());
399 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
400 SkASSERT(vkTex);
401 GrVkImage* texture = vkTex->textureImage();
402 SkASSERT(texture);
403 texture->setImageLayout(
404 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
405 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
406 }
407 }
408
409 SkASSERT(framebuffer);
410 fFramebuffer = std::move(framebuffer);
411
412 SkASSERT(bounds.isEmpty() ||
413 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
414 fBounds = bounds;
415
416 fSelfDependencyFlags = selfDepFlags;
417 fLoadFromResolve = loadFromResolve;
418
419 if (this->wrapsSecondaryCommandBuffer()) {
420 return this->initWrapped();
421 }
422
423 return this->init(colorInfo, resolveInfo, stencilInfo);
424 }
425
reset()426 void GrVkOpsRenderPass::reset() {
427 if (fCurrentSecondaryCommandBuffer) {
428 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
429 // secondary command buffer from since we haven't submitted any work yet.
430 SkASSERT(fGpu->cmdPool());
431 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
432 }
433 if (fCurrentRenderPass) {
434 fCurrentRenderPass->unref();
435 fCurrentRenderPass = nullptr;
436 }
437 fCurrentCBIsEmpty = true;
438
439 fRenderTarget = nullptr;
440 fFramebuffer.reset();
441
442 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
443
444 fLoadFromResolve = LoadFromResolve::kNo;
445 fOverridePipelinesForResolveLoad = false;
446
447 #ifdef SK_DEBUG
448 fIsActive = false;
449 #endif
450 }
451
wrapsSecondaryCommandBuffer() const452 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
453 return fFramebuffer->isExternal();
454 }
455
456 ////////////////////////////////////////////////////////////////////////////////
457
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)458 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
459 if (!fCurrentRenderPass) {
460 SkASSERT(fGpu->isDeviceLost());
461 return;
462 }
463
464 GrAttachment* sb = fFramebuffer->stencilAttachment();
465 // this should only be called internally when we know we have a
466 // stencil buffer.
467 SkASSERT(sb);
468 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
469
470 // The contract with the callers does not guarantee that we preserve all bits in the stencil
471 // during this clear. Thus we will clear the entire stencil to the desired value.
472
473 VkClearDepthStencilValue vkStencilColor;
474 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
475 if (insideStencilMask) {
476 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
477 } else {
478 vkStencilColor.stencil = 0;
479 }
480
481 VkClearRect clearRect;
482 // Flip rect if necessary
483 SkIRect vkRect;
484 if (!scissor.enabled()) {
485 vkRect.setXYWH(0, 0, sb->width(), sb->height());
486 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
487 vkRect = scissor.rect();
488 } else {
489 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
490 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
491 }
492
493 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
494 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
495
496 clearRect.baseArrayLayer = 0;
497 clearRect.layerCount = 1;
498
499 uint32_t stencilIndex;
500 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
501
502 VkClearAttachment attachment;
503 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
504 attachment.colorAttachment = 0; // this value shouldn't matter
505 attachment.clearValue.depthStencil = vkStencilColor;
506
507 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
508 fCurrentCBIsEmpty = false;
509 }
510
onClear(const GrScissorState & scissor,std::array<float,4> color)511 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
512 if (!fCurrentRenderPass) {
513 SkASSERT(fGpu->isDeviceLost());
514 return;
515 }
516
517 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
518
519 // If we end up in a situation where we are calling clear without a scissior then in general it
520 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
521 // there are situations where higher up we couldn't discard the previous ops and set a clear
522 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
523 // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
524 // can then reenable this assert assuming we can't get messed up by a waitOp.
525 //SkASSERT(!fCurrentCBIsEmpty || scissor);
526
527 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
528 // We always do a sub rect clear with clearAttachments since we are inside a render pass
529 VkClearRect clearRect;
530 // Flip rect if necessary
531 SkIRect vkRect;
532 if (!scissor.enabled()) {
533 vkRect.setSize(dimensions);
534 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
535 vkRect = scissor.rect();
536 } else {
537 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
538 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
539 }
540 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
541 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
542 clearRect.baseArrayLayer = 0;
543 clearRect.layerCount = 1;
544
545 uint32_t colorIndex;
546 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
547
548 VkClearAttachment attachment;
549 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
550 attachment.colorAttachment = colorIndex;
551 attachment.clearValue.color = vkColor;
552
553 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
554 fCurrentCBIsEmpty = false;
555 }
556
557 ////////////////////////////////////////////////////////////////////////////////
558
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)559 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
560 SkASSERT(!this->wrapsSecondaryCommandBuffer());
561
562 bool withResolve = fFramebuffer->resolveAttachment();
563 bool withStencil = fFramebuffer->stencilAttachment();
564
565 // If we have a resolve attachment we must do a resolve load in the new render pass since we
566 // broke up the original one. GrProgramInfos were made without any knowledge that the render
567 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
568 // need to override that to make sure they are compatible with the extra load subpass.
569 fOverridePipelinesForResolveLoad |=
570 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
571
572 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
573 VK_ATTACHMENT_STORE_OP_STORE);
574 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
575 VK_ATTACHMENT_STORE_OP_STORE);
576 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
577 if (withResolve) {
578 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
579 loadFromResolve = LoadFromResolve::kLoad;
580 }
581 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
582 VK_ATTACHMENT_STORE_OP_STORE);
583
584 SkASSERT(fCurrentRenderPass);
585 fCurrentRenderPass->unref();
586 fCurrentRenderPass = nullptr;
587
588 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
589 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
590 if (!fb) {
591 return;
592 }
593 fFramebuffer = sk_ref_sp(fb);
594
595 SkASSERT(fFramebuffer);
596 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
597 fFramebuffer->compatibleRenderPassHandle();
598 SkASSERT(rpHandle.isValid());
599
600 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
601 vkColorOps,
602 vkResolveOps,
603 vkStencilOps);
604
605 if (!fCurrentRenderPass) {
606 return;
607 }
608
609 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
610 mustUseSecondaryCommandBuffer) {
611 SkASSERT(fGpu->cmdPool());
612 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
613 if (!fCurrentSecondaryCommandBuffer) {
614 fCurrentRenderPass = nullptr;
615 return;
616 }
617 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
618 }
619
620 VkClearValue vkClearColor;
621 memset(&vkClearColor, 0, sizeof(VkClearValue));
622
623 this->beginRenderPass(vkClearColor, loadFromResolve);
624 }
625
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)626 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
627 if (!fCurrentRenderPass) {
628 SkASSERT(fGpu->isDeviceLost());
629 return;
630 }
631 if (fCurrentSecondaryCommandBuffer) {
632 fCurrentSecondaryCommandBuffer->end(fGpu);
633 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
634 }
635 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
636
637 // We pass in true here to signal that after the upload we need to set the upload textures
638 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
639 state->doUpload(upload, true);
640
641 this->addAdditionalRenderPass(false);
642 }
643
644 ////////////////////////////////////////////////////////////////////////////////
645
onEnd()646 void GrVkOpsRenderPass::onEnd() {
647 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
648 fCurrentSecondaryCommandBuffer->end(fGpu);
649 }
650 }
651
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)652 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
653 if (!fCurrentRenderPass) {
654 SkASSERT(fGpu->isDeviceLost());
655 return false;
656 }
657
658 SkRect rtRect = SkRect::Make(fBounds);
659 if (rtRect.intersect(drawBounds)) {
660 rtRect.roundOut(&fCurrentPipelineBounds);
661 } else {
662 fCurrentPipelineBounds.setEmpty();
663 }
664
665 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
666 SkASSERT(fCurrentRenderPass);
667
668 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
669 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
670 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
671 if (!fCurrentPipelineState) {
672 return false;
673 }
674
675 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
676
677 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
678 // same place (i.e., the target renderTargetProxy) they had best agree.
679 SkASSERT(programInfo.origin() == fOrigin);
680
681 auto colorAttachment = fFramebuffer->colorAttachment();
682 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
683 currentCB)) {
684 return false;
685 }
686
687 if (!programInfo.pipeline().isScissorTestEnabled()) {
688 // "Disable" scissor by setting it to the full pipeline bounds.
689 GrVkPipeline::SetDynamicScissorRectState(
690 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
691 fCurrentPipelineBounds);
692 }
693 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
694 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
695 programInfo.pipeline().writeSwizzle(),
696 programInfo.pipeline().getXferProcessor());
697
698 return true;
699 }
700
onSetScissorRect(const SkIRect & scissor)701 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
702 SkIRect combinedScissorRect;
703 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
704 combinedScissorRect = SkIRect::MakeEmpty();
705 }
706 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
707 fFramebuffer->colorAttachment()->dimensions(),
708 fOrigin, combinedScissorRect);
709 }
710
711 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)712 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
713 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
714 auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
715 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
716 }
717 #endif
718
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)719 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
720 const GrSurfaceProxy* const geomProcTextures[],
721 const GrPipeline& pipeline) {
722 #ifdef SK_DEBUG
723 SkASSERT(fCurrentPipelineState);
724 auto colorAttachment = fFramebuffer->colorAttachment();
725 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
726 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
727 }
728 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
729 check_sampled_texture(te.texture(), colorAttachment, fGpu);
730 });
731 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
732 check_sampled_texture(dstTexture, colorAttachment, fGpu);
733 }
734 #endif
735 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
736 this->currentCommandBuffer())) {
737 return false;
738 }
739 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
740 // We bind the color attachment as an input attachment
741 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
742 if (!ds) {
743 return false;
744 }
745 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
746 this->currentCommandBuffer());
747 }
748 return true;
749 }
750
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)751 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
752 sk_sp<const GrBuffer> instanceBuffer,
753 sk_sp<const GrBuffer> vertexBuffer,
754 GrPrimitiveRestart primRestart) {
755 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
756 if (!fCurrentRenderPass) {
757 SkASSERT(fGpu->isDeviceLost());
758 return;
759 }
760 SkASSERT(fCurrentPipelineState);
761 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
762
763 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
764 SkASSERT(currCmdBuf);
765
766 // There is no need to put any memory barriers to make sure host writes have finished here.
767 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
768 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
769 // an active RenderPass.
770
771 // Here our vertex and instance inputs need to match the same 0-based bindings they were
772 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
773 uint32_t binding = 0;
774 if (vertexBuffer) {
775 SkDEBUGCODE(auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get()));
776 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
777 SkASSERT(!gpuVertexBuffer->isMapped());
778 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
779 }
780 if (instanceBuffer) {
781 SkDEBUGCODE(auto* gpuInstanceBuffer =
782 static_cast<const GrGpuBuffer*>(instanceBuffer.get()));
783 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
784 SkASSERT(!gpuInstanceBuffer->isMapped());
785 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
786 }
787 if (indexBuffer) {
788 SkDEBUGCODE(auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get()));
789 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
790 SkASSERT(!gpuIndexBuffer->isMapped());
791 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
792 }
793 }
794
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)795 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
796 int baseInstance,
797 int vertexCount, int baseVertex) {
798 if (!fCurrentRenderPass) {
799 SkASSERT(fGpu->isDeviceLost());
800 return;
801 }
802 SkASSERT(fCurrentPipelineState);
803 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
804 fGpu->stats()->incNumDraws();
805 fCurrentCBIsEmpty = false;
806 }
807
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)808 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
809 int baseInstance, int baseVertex) {
810 if (!fCurrentRenderPass) {
811 SkASSERT(fGpu->isDeviceLost());
812 return;
813 }
814 SkASSERT(fCurrentPipelineState);
815 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
816 baseIndex, baseVertex, baseInstance);
817 fGpu->stats()->incNumDraws();
818 fCurrentCBIsEmpty = false;
819 }
820
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)821 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
822 int drawCount) {
823 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
824 if (!fCurrentRenderPass) {
825 SkASSERT(fGpu->isDeviceLost());
826 return;
827 }
828 const GrVkCaps& caps = fGpu->vkCaps();
829 SkASSERT(caps.nativeDrawIndirectSupport());
830 SkASSERT(fCurrentPipelineState);
831
832 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
833 uint32_t remainingDraws = drawCount;
834 const size_t stride = sizeof(GrDrawIndirectCommand);
835 while (remainingDraws >= 1) {
836 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
837 this->currentCommandBuffer()->drawIndirect(
838 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
839 remainingDraws -= currDrawCount;
840 offset += stride * currDrawCount;
841 fGpu->stats()->incNumDraws();
842 }
843 fCurrentCBIsEmpty = false;
844 }
845
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)846 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
847 int drawCount) {
848 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
849 if (!fCurrentRenderPass) {
850 SkASSERT(fGpu->isDeviceLost());
851 return;
852 }
853 const GrVkCaps& caps = fGpu->vkCaps();
854 SkASSERT(caps.nativeDrawIndirectSupport());
855 SkASSERT(fCurrentPipelineState);
856 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
857 uint32_t remainingDraws = drawCount;
858 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
859 while (remainingDraws >= 1) {
860 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
861 this->currentCommandBuffer()->drawIndexedIndirect(
862 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
863 remainingDraws -= currDrawCount;
864 offset += stride * currDrawCount;
865 fGpu->stats()->incNumDraws();
866 }
867 fCurrentCBIsEmpty = false;
868 }
869
870 ////////////////////////////////////////////////////////////////////////////////
871
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)872 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
873 if (!fCurrentRenderPass) {
874 SkASSERT(fGpu->isDeviceLost());
875 return;
876 }
877
878 VkRect2D bounds;
879 bounds.offset = { 0, 0 };
880 bounds.extent = { 0, 0 };
881
882 if (!fCurrentSecondaryCommandBuffer) {
883 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
884 this->addAdditionalRenderPass(true);
885 // We may have failed to start a new render pass
886 if (!fCurrentRenderPass) {
887 SkASSERT(fGpu->isDeviceLost());
888 return;
889 }
890 }
891 SkASSERT(fCurrentSecondaryCommandBuffer);
892
893 GrVkDrawableInfo vkInfo;
894 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
895 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
896 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
897 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
898 vkInfo.fDrawBounds = &bounds;
899 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
900 vkInfo.fFromSwapchainOrAndroidWindow =
901 fFramebuffer->colorAttachment()->vkImageInfo().fPartOfSwapchainOrAndroidWindow;
902 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
903
904 GrBackendDrawableInfo info(vkInfo);
905
906 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
907 this->currentCommandBuffer()->invalidateState();
908 // Also assume that the drawable produced output.
909 fCurrentCBIsEmpty = false;
910
911 drawable->draw(info);
912 fGpu->addDrawable(std::move(drawable));
913 }
914