1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/DrawContext.h"
9
10 #include "include/core/SkColorSpace.h"
11 #include "include/core/SkPixmap.h"
12 #include "include/private/SkColorData.h"
13
14 #include "include/gpu/graphite/Context.h"
15 #include "include/gpu/graphite/Recorder.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/graphite/AtlasProvider.h"
18 #include "src/gpu/graphite/Buffer.h"
19 #include "src/gpu/graphite/Caps.h"
20 #include "src/gpu/graphite/CommandBuffer.h"
21 #include "src/gpu/graphite/ComputePathAtlas.h"
22 #include "src/gpu/graphite/ContextPriv.h"
23 #include "src/gpu/graphite/DrawList.h"
24 #include "src/gpu/graphite/DrawPass.h"
25 #include "src/gpu/graphite/Log.h"
26 #include "src/gpu/graphite/RasterPathAtlas.h"
27 #include "src/gpu/graphite/RecorderPriv.h"
28 #include "src/gpu/graphite/RenderPassDesc.h"
29 #include "src/gpu/graphite/ResourceTypes.h"
30 #include "src/gpu/graphite/SharedContext.h"
31 #include "src/gpu/graphite/TextureProxy.h"
32 #include "src/gpu/graphite/TextureProxyView.h"
33 #include "src/gpu/graphite/compute/DispatchGroup.h"
34 #include "src/gpu/graphite/geom/BoundsManager.h"
35 #include "src/gpu/graphite/geom/Geometry.h"
36 #include "src/gpu/graphite/task/ComputeTask.h"
37 #include "src/gpu/graphite/task/CopyTask.h"
38 #include "src/gpu/graphite/task/DrawTask.h"
39 #include "src/gpu/graphite/task/RenderPassTask.h"
40 #include "src/gpu/graphite/task/UploadTask.h"
41 #include "src/gpu/graphite/text/TextAtlasManager.h"
42
43 namespace skgpu::graphite {
44
45 namespace {
46
47 // Discarding content on floating point textures can leave nans as the prior color for a pixel,
48 // in which case hardware blending (when enabled) will fail even if the src, dst coefficients
49 // and coverage would produce the unmodified src value.
discard_op_should_use_clear(SkColorType ct)50 bool discard_op_should_use_clear(SkColorType ct) {
51 switch(ct) {
52 case kRGBA_F16Norm_SkColorType:
53 case kRGBA_F16_SkColorType:
54 case kRGBA_F32_SkColorType:
55 case kA16_float_SkColorType:
56 case kR16G16_float_SkColorType:
57 return true;
58 default:
59 return false;
60 }
61 }
62
63 } // anonymous namespace
64
Make(const Caps * caps,sk_sp<TextureProxy> target,SkISize deviceSize,const SkColorInfo & colorInfo,const SkSurfaceProps & props)65 sk_sp<DrawContext> DrawContext::Make(const Caps* caps,
66 sk_sp<TextureProxy> target,
67 SkISize deviceSize,
68 const SkColorInfo& colorInfo,
69 const SkSurfaceProps& props) {
70 if (!target) {
71 return nullptr;
72 }
73 // We don't render to unknown or unpremul alphatypes
74 if (colorInfo.alphaType() == kUnknown_SkAlphaType ||
75 colorInfo.alphaType() == kUnpremul_SkAlphaType) {
76 return nullptr;
77 }
78 if (!caps->isRenderable(target->textureInfo())) {
79 return nullptr;
80 }
81
82 // Accept an approximate-fit texture, but make sure it's at least as large as the device's
83 // logical size.
84 // TODO: validate that the color type and alpha type are compatible with the target's info
85 SkASSERT(target->isFullyLazy() || (target->dimensions().width() >= deviceSize.width() &&
86 target->dimensions().height() >= deviceSize.height()));
87 SkImageInfo imageInfo = SkImageInfo::Make(deviceSize, colorInfo);
88 return sk_sp<DrawContext>(new DrawContext(caps, std::move(target), imageInfo, props));
89 }
90
DrawContext(const Caps * caps,sk_sp<TextureProxy> target,const SkImageInfo & ii,const SkSurfaceProps & props)91 DrawContext::DrawContext(const Caps* caps,
92 sk_sp<TextureProxy> target,
93 const SkImageInfo& ii,
94 const SkSurfaceProps& props)
95 : fTarget(std::move(target))
96 , fImageInfo(ii)
97 , fSurfaceProps(props)
98 , fCurrentDrawTask(sk_make_sp<DrawTask>(fTarget))
99 , fPendingDraws(std::make_unique<DrawList>())
100 , fPendingUploads(std::make_unique<UploadList>()) {
101 if (!caps->isTexturable(fTarget->textureInfo())) {
102 fReadView = {}; // Presumably this DrawContext is rendering into a swap chain
103 } else {
104 Swizzle swizzle = caps->getReadSwizzle(ii.colorType(), fTarget->textureInfo());
105 fReadView = {fTarget, swizzle};
106 }
107 // TBD - Will probably want DrawLists (and its internal commands) to come from an arena
108 // that the DC manages.
109 }
110
111 DrawContext::~DrawContext() = default;
112
clear(const SkColor4f & clearColor)113 void DrawContext::clear(const SkColor4f& clearColor) {
114 this->discard();
115
116 fPendingLoadOp = LoadOp::kClear;
117 SkPMColor4f pmColor = clearColor.premul();
118 fPendingClearColor = pmColor.array();
119 }
120
discard()121 void DrawContext::discard() {
122 // Non-loading operations on a fully lazy target can corrupt data beyond the DrawContext's
123 // region so should be avoided.
124 SkASSERT(!fTarget->isFullyLazy());
125
126 // A fullscreen clear or discard will overwrite anything that came before, so clear the DrawList
127 // NOTE: Eventually the current DrawTask should be reset, once there are no longer implicit
128 // dependencies on atlas tasks between DrawContexts. When that's resolved, the only tasks in the
129 // current DrawTask are those that directly impact the target, which becomes irrelevant with the
130 // clear op overwriting it. For now, preserve the previous tasks that might include atlas
131 // uploads that are not explicitly shared between DrawContexts.
132 if (fPendingDraws->renderStepCount() > 0) {
133 fPendingDraws = std::make_unique<DrawList>();
134 }
135 if (fComputePathAtlas) {
136 fComputePathAtlas->reset();
137 }
138
139 if (discard_op_should_use_clear(fImageInfo.colorType())) {
140 // In theory the clear color shouldn't matter since a discardable state should be fully
141 // overwritten by later draws, but if a previous call to clear() had injected bad data,
142 // the discard should not inherit it.
143 fPendingClearColor = {0.f, 0.f, 0.f, 0.f};
144 fPendingLoadOp = LoadOp::kClear;
145 } else {
146 fPendingLoadOp = LoadOp::kDiscard;
147 }
148 }
149
recordDraw(const Renderer * renderer,const Transform & localToDevice,const Geometry & geometry,const Clip & clip,DrawOrder ordering,const PaintParams * paint,const StrokeStyle * stroke)150 void DrawContext::recordDraw(const Renderer* renderer,
151 const Transform& localToDevice,
152 const Geometry& geometry,
153 const Clip& clip,
154 DrawOrder ordering,
155 const PaintParams* paint,
156 const StrokeStyle* stroke) {
157 SkASSERT(SkIRect::MakeSize(this->imageInfo().dimensions()).contains(clip.scissor()));
158 fPendingDraws->recordDraw(renderer, localToDevice, geometry, clip, ordering, paint, stroke);
159 }
160
recordUpload(Recorder * recorder,sk_sp<TextureProxy> targetProxy,const SkColorInfo & srcColorInfo,const SkColorInfo & dstColorInfo,const std::vector<MipLevel> & levels,const SkIRect & dstRect,std::unique_ptr<ConditionalUploadContext> condContext)161 bool DrawContext::recordUpload(Recorder* recorder,
162 sk_sp<TextureProxy> targetProxy,
163 const SkColorInfo& srcColorInfo,
164 const SkColorInfo& dstColorInfo,
165 const std::vector<MipLevel>& levels,
166 const SkIRect& dstRect,
167 std::unique_ptr<ConditionalUploadContext> condContext) {
168 // Our caller should have clipped to the bounds of the surface already.
169 SkASSERT(targetProxy->isFullyLazy() ||
170 SkIRect::MakeSize(targetProxy->dimensions()).contains(dstRect));
171 return fPendingUploads->recordUpload(recorder,
172 std::move(targetProxy),
173 srcColorInfo,
174 dstColorInfo,
175 levels,
176 dstRect,
177 std::move(condContext));
178 }
179
recordDependency(sk_sp<Task> task)180 void DrawContext::recordDependency(sk_sp<Task> task) {
181 SkASSERT(task);
182 // Adding `task` to the current DrawTask directly means that it will execute after any previous
183 // dependent tasks and after any previous calls to flush(), but everything else that's being
184 // collected on the DrawContext will execute after `task` once the next flush() is performed.
185 fCurrentDrawTask->addTask(std::move(task));
186 }
187
getComputePathAtlas(Recorder * recorder)188 PathAtlas* DrawContext::getComputePathAtlas(Recorder* recorder) {
189 if (!fComputePathAtlas) {
190 fComputePathAtlas = recorder->priv().atlasProvider()->createComputePathAtlas(recorder);
191 }
192 return fComputePathAtlas.get();
193 }
194
flush(Recorder * recorder)195 void DrawContext::flush(Recorder* recorder) {
196 if (fPendingUploads->size() > 0) {
197 TRACE_EVENT_INSTANT1("skia.gpu", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
198 "# uploads", fPendingUploads->size());
199 fCurrentDrawTask->addTask(UploadTask::Make(fPendingUploads.get()));
200 // The UploadTask steals the collected upload instances, automatically resetting this list
201 SkASSERT(fPendingUploads->size() == 0);
202 }
203
204 // Generate compute dispatches that render into the atlas texture used by pending draws.
205 // TODO: Once compute atlas caching is implemented, DrawContext might not hold onto to this
206 // at which point a recordDispatch() could be added and it stores a pending dispatches list that
207 // much like how uploads are handled. In that case, Device would be responsible for triggering
208 // the recording of dispatches, but that may happen naturally in AtlasProvider::recordUploads().
209 if (fComputePathAtlas) {
210 ComputeTask::DispatchGroupList dispatches;
211 if (fComputePathAtlas->recordDispatches(recorder, &dispatches)) {
212 // For now this check is valid as all coverage mask draws involve dispatches
213 SkASSERT(fPendingDraws->hasCoverageMaskDraws());
214
215 fCurrentDrawTask->addTask(ComputeTask::Make(std::move(dispatches)));
216 } // else no pending compute work needed to be recorded
217
218 fComputePathAtlas->reset();
219 } // else platform doesn't support compute or atlas was never initialized.
220
221 if (fPendingDraws->renderStepCount() == 0 && fPendingLoadOp != LoadOp::kClear) {
222 // Nothing will be rasterized to the target that warrants a RenderPassTask, but we preserve
223 // any added uploads or compute tasks since those could also affect the target w/o
224 // rasterizing anything directly.
225 return;
226 }
227
228 // Convert the pending draws and load/store ops into a DrawPass that will be executed after
229 // the collected uploads and compute dispatches. Save the bounds required for a dst copy to
230 // insert a copy task of sufficient size.
231 // TODO: At this point, there's only ever one DrawPass in a RenderPassTask to a target. When
232 // subpasses are implemented, they will either be collected alongside fPendingDraws or added
233 // to the RenderPassTask separately.
234 SkIRect dstCopyPixelBounds = fPendingDraws->dstCopyBounds().makeRoundOut().asSkIRect();
235 std::unique_ptr<DrawPass> pass = DrawPass::Make(recorder,
236 std::move(fPendingDraws),
237 fTarget,
238 this->imageInfo(),
239 std::make_pair(fPendingLoadOp, fPendingStoreOp),
240 fPendingClearColor);
241 fPendingDraws = std::make_unique<DrawList>();
242 // Now that there is content drawn to the target, that content must be loaded on any subsequent
243 // render pass.
244 fPendingLoadOp = LoadOp::kLoad;
245 fPendingStoreOp = StoreOp::kStore;
246
247 if (pass) {
248 SkASSERT(fTarget.get() == pass->target());
249
250 sk_sp<TextureProxy> dstCopy;
251 if (!dstCopyPixelBounds.isEmpty()) {
252 TRACE_EVENT_INSTANT0("skia.gpu", "DrawPass requires dst copy",
253 TRACE_EVENT_SCOPE_THREAD);
254
255 // TODO: Right now this assert is ensuring that the dstCopy will be texturable since it
256 // uses the same texture info as fTarget. Ideally, if fTarget were not texturable but
257 // still readable, we would perform a fallback to a compatible texturable info. We also
258 // should decide whether or not a copy-as-draw fallback is necessary here too. All of
259 // this is handled inside Image::Copy() except we would need it to expose the task in
260 // order to link it correctly.
261 SkASSERT(recorder->priv().caps()->isTexturable(fTarget->textureInfo()));
262 dstCopy = TextureProxy::Make(recorder->priv().caps(),
263 recorder->priv().resourceProvider(),
264 dstCopyPixelBounds.size(),
265 fTarget->textureInfo(),
266 "DstCopyTexture",
267 skgpu::Budgeted::kYes);
268 SkASSERT(dstCopy);
269
270 // Add the copy task to initialize dstCopy before the render pass task.
271 fCurrentDrawTask->addTask(CopyTextureToTextureTask::Make(
272 fTarget, dstCopyPixelBounds, dstCopy, /*dstPoint=*/{0, 0}));
273 }
274
275 const Caps* caps = recorder->priv().caps();
276 auto [loadOp, storeOp] = pass->ops();
277 auto writeSwizzle = caps->getWriteSwizzle(this->colorInfo().colorType(),
278 fTarget->textureInfo());
279
280 RenderPassDesc desc = RenderPassDesc::Make(caps, fTarget->textureInfo(), loadOp, storeOp,
281 pass->depthStencilFlags(),
282 pass->clearColor(),
283 pass->requiresMSAA(),
284 writeSwizzle);
285
286 RenderPassTask::DrawPassList passes;
287 passes.emplace_back(std::move(pass));
288 fCurrentDrawTask->addTask(RenderPassTask::Make(std::move(passes), desc, fTarget,
289 std::move(dstCopy), dstCopyPixelBounds));
290 }
291 // else pass creation failed, DrawPass will have logged why. Don't discard the previously
292 // accumulated tasks, however, since they may represent operations on an atlas that other
293 // DrawContexts now implicitly depend on.
294 }
295
snapDrawTask(Recorder * recorder)296 sk_sp<Task> DrawContext::snapDrawTask(Recorder* recorder) {
297 // If flush() was explicitly called earlier and no new work was recorded, this call to flush()
298 // is a no-op and shouldn't hurt performance.
299 this->flush(recorder);
300
301 if (!fCurrentDrawTask->hasTasks()) {
302 return nullptr;
303 }
304
305 sk_sp<Task> snappedTask = std::move(fCurrentDrawTask);
306 fCurrentDrawTask = sk_make_sp<DrawTask>(fTarget);
307 return snappedTask;
308 }
309
310 } // namespace skgpu::graphite
311