1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/Device.h"
9
10 #include "include/gpu/graphite/Recorder.h"
11 #include "include/gpu/graphite/Recording.h"
12 #include "include/gpu/graphite/Surface.h"
13 #include "src/gpu/AtlasTypes.h"
14 #include "src/gpu/BlurUtils.h"
15 #include "src/gpu/SkBackingFit.h"
16 #include "src/gpu/graphite/AtlasProvider.h"
17 #include "src/gpu/graphite/Buffer.h"
18 #include "src/gpu/graphite/Caps.h"
19 #include "src/gpu/graphite/CommandBuffer.h"
20 #include "src/gpu/graphite/ContextOptionsPriv.h"
21 #include "src/gpu/graphite/ContextPriv.h"
22 #include "src/gpu/graphite/ContextUtils.h"
23 #include "src/gpu/graphite/DrawContext.h"
24 #include "src/gpu/graphite/DrawList.h"
25 #include "src/gpu/graphite/DrawParams.h"
26 #include "src/gpu/graphite/Image_Graphite.h"
27 #include "src/gpu/graphite/Log.h"
28 #include "src/gpu/graphite/PathAtlas.h"
29 #include "src/gpu/graphite/RasterPathAtlas.h"
30 #include "src/gpu/graphite/RecorderPriv.h"
31 #include "src/gpu/graphite/Renderer.h"
32 #include "src/gpu/graphite/RendererProvider.h"
33 #include "src/gpu/graphite/ResourceTypes.h"
34 #include "src/gpu/graphite/SharedContext.h"
35 #include "src/gpu/graphite/SpecialImage_Graphite.h"
36 #include "src/gpu/graphite/Surface_Graphite.h"
37 #include "src/gpu/graphite/TextureProxy.h"
38 #include "src/gpu/graphite/TextureUtils.h"
39 #include "src/gpu/graphite/geom/BoundsManager.h"
40 #include "src/gpu/graphite/geom/Geometry.h"
41 #include "src/gpu/graphite/geom/IntersectionTree.h"
42 #include "src/gpu/graphite/geom/Shape.h"
43 #include "src/gpu/graphite/geom/Transform_graphite.h"
44 #include "src/gpu/graphite/text/TextAtlasManager.h"
45
46 #include "include/core/SkColorSpace.h"
47 #include "include/core/SkPath.h"
48 #include "include/core/SkPathEffect.h"
49 #include "include/core/SkStrokeRec.h"
50
51 #include "src/core/SkBlenderBase.h"
52 #include "src/core/SkBlurMaskFilterImpl.h"
53 #include "src/core/SkColorSpacePriv.h"
54 #include "src/core/SkConvertPixels.h"
55 #include "src/core/SkImageFilterTypes.h"
56 #include "src/core/SkImageInfoPriv.h"
57 #include "src/core/SkImagePriv.h"
58 #include "src/core/SkMatrixPriv.h"
59 #include "src/core/SkPaintPriv.h"
60 #include "src/core/SkRRectPriv.h"
61 #include "src/core/SkSpecialImage.h"
62 #include "src/core/SkStrikeCache.h"
63 #include "src/core/SkTraceEvent.h"
64 #include "src/core/SkVerticesPriv.h"
65 #include "src/gpu/TiledTextureUtils.h"
66 #include "src/text/GlyphRun.h"
67 #include "src/text/gpu/GlyphVector.h"
68 #include "src/text/gpu/SlugImpl.h"
69 #include "src/text/gpu/SubRunContainer.h"
70 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
71 #include "src/text/gpu/VertexFiller.h"
72
73 #include <functional>
74 #include <tuple>
75 #include <unordered_map>
76 #include <vector>
77
78 using RescaleGamma = SkImage::RescaleGamma;
79 using RescaleMode = SkImage::RescaleMode;
80 using ReadPixelsCallback = SkImage::ReadPixelsCallback;
81 using ReadPixelsContext = SkImage::ReadPixelsContext;
82
83 #if defined(GPU_TEST_UTILS)
84 int gOverrideMaxTextureSizeGraphite = 0;
85 // Allows tests to check how many tiles were drawn on the most recent call to
86 // Device::drawAsTiledImageRect. This is an atomic because we can write to it from
87 // multiple threads during "normal" operations. However, the tests that actually
88 // read from it are done single-threaded.
89 std::atomic<int> gNumTilesDrawnGraphite{0};
90 #endif
91
92 namespace skgpu::graphite {
93
94 #define ASSERT_SINGLE_OWNER SkASSERT(fRecorder); SKGPU_ASSERT_SINGLE_OWNER(fRecorder->singleOwner())
95
96 namespace {
97
DefaultFillStyle()98 const SkStrokeRec& DefaultFillStyle() {
99 static const SkStrokeRec kFillStyle(SkStrokeRec::kFill_InitStyle);
100 return kFillStyle;
101 }
102
blender_depends_on_dst(const SkBlender * blender,bool srcIsTransparent)103 bool blender_depends_on_dst(const SkBlender* blender, bool srcIsTransparent) {
104 std::optional<SkBlendMode> bm = blender ? as_BB(blender)->asBlendMode() : SkBlendMode::kSrcOver;
105 if (!bm.has_value()) {
106 return true;
107 }
108 if (bm.value() == SkBlendMode::kSrc || bm.value() == SkBlendMode::kClear) {
109 // src and clear blending never depends on dst
110 return false;
111 }
112 if (bm.value() == SkBlendMode::kSrcOver) {
113 // src-over depends on dst if src is transparent (a != 1)
114 return srcIsTransparent;
115 }
116 // TODO: Are their other modes that don't depend on dst that can be trivially detected?
117 return true;
118 }
119
paint_depends_on_dst(SkColor4f color,const SkShader * shader,const SkColorFilter * colorFilter,const SkBlender * finalBlender,const SkBlender * primitiveBlender)120 bool paint_depends_on_dst(SkColor4f color,
121 const SkShader* shader,
122 const SkColorFilter* colorFilter,
123 const SkBlender* finalBlender,
124 const SkBlender* primitiveBlender) {
125 const bool srcIsTransparent = !color.isOpaque() || (shader && !shader->isOpaque()) ||
126 (colorFilter && !colorFilter->isAlphaUnchanged());
127
128 if (primitiveBlender && blender_depends_on_dst(primitiveBlender, srcIsTransparent)) {
129 return true;
130 }
131
132 return blender_depends_on_dst(finalBlender, srcIsTransparent);
133 }
134
paint_depends_on_dst(const PaintParams & paintParams)135 bool paint_depends_on_dst(const PaintParams& paintParams) {
136 return paint_depends_on_dst(paintParams.color(),
137 paintParams.shader(),
138 paintParams.colorFilter(),
139 paintParams.finalBlender(),
140 paintParams.primitiveBlender());
141 }
142
paint_depends_on_dst(const SkPaint & paint)143 bool paint_depends_on_dst(const SkPaint& paint) {
144 // CAUTION: getMaskFilter is intentionally ignored here.
145 SkASSERT(!paint.getImageFilter()); // no paints in SkDevice should have an image filter
146 return paint_depends_on_dst(paint.getColor4f(),
147 paint.getShader(),
148 paint.getColorFilter(),
149 paint.getBlender(),
150 /*primitiveBlender=*/nullptr);
151 }
152
153 /** If the paint can be reduced to a solid flood-fill, determine the correct color to fill with. */
extract_paint_color(const SkPaint & paint,const SkColorInfo & dstColorInfo)154 std::optional<SkColor4f> extract_paint_color(const SkPaint& paint,
155 const SkColorInfo& dstColorInfo) {
156 SkASSERT(!paint_depends_on_dst(paint));
157 if (paint.getShader()) {
158 return std::nullopt;
159 }
160
161 SkColor4f dstPaintColor = PaintParams::Color4fPrepForDst(paint.getColor4f(), dstColorInfo);
162
163 if (SkColorFilter* filter = paint.getColorFilter()) {
164 SkColorSpace* dstCS = dstColorInfo.colorSpace();
165 return filter->filterColor4f(dstPaintColor, dstCS, dstCS);
166 }
167 return dstPaintColor;
168 }
169
170 // Returns a local rect that has been adjusted such that when it's rasterized with `localToDevice`
171 // it will be pixel aligned. If this adjustment is not possible (due to transform type or precision)
172 // then this returns the original local rect unmodified.
173 //
174 // If `strokeWidth` is null, it's assumed to be a filled rectangle. If it's not null, on input it
175 // should hold the stroke width (or 0 for a hairline). After this returns, the stroke width may
176 // have been adjusted so that outer and inner stroked edges are pixel aligned (in which case the
177 // underlying rectangle geometry probably won't be pixel aligned).
178 //
179 // A best effort is made to align the stroke edges when there's a non-uniform scale factor that
180 // prevents exactly aligning both X and Y axes.
snap_rect_to_pixels(const Transform & localToDevice,const Rect & rect,float * strokeWidth=nullptr)181 Rect snap_rect_to_pixels(const Transform& localToDevice,
182 const Rect& rect,
183 float* strokeWidth=nullptr) {
184 if (localToDevice.type() > Transform::Type::kRectStaysRect) {
185 return rect;
186 }
187
188 Rect snappedDeviceRect;
189 if (!strokeWidth) {
190 // Just a fill, use round() to emulate non-AA rasterization (vs. roundOut() to get the
191 // covering bounds). This matches how ClipStack treats clipRects with PixelSnapping::kYes.
192 snappedDeviceRect = localToDevice.mapRect(rect).round();
193 } else if (strokeWidth) {
194 if (*strokeWidth == 0.f) {
195 // Hairline case needs to be outset by 1/2 device pixels *before* rounding, and then
196 // inset by 1/2px to get the base shape while leaving the stroke width as 0.
197 snappedDeviceRect = localToDevice.mapRect(rect);
198 snappedDeviceRect.outset(0.5f).round().inset(0.5f);
199 } else {
200 // For regular strokes, outset by the stroke radius *before* mapping to device space,
201 // and then round.
202 snappedDeviceRect = localToDevice.mapRect(rect.makeOutset(0.5f*(*strokeWidth))).round();
203
204 // devScales.x() holds scale factor affecting device-space X axis (so max of |m00| or
205 // |m01|) and y() holds the device Y axis scale (max of |m10| or |m11|).
206 skvx::float2 devScales = max(abs(skvx::float2(localToDevice.matrix().rc(0,0),
207 localToDevice.matrix().rc(1,0))),
208 abs(skvx::float2(localToDevice.matrix().rc(0,1),
209 localToDevice.matrix().rc(1,1))));
210 skvx::float2 devStrokeWidth = max(round(*strokeWidth * devScales), 1.f);
211
212 // Prioritize the axis that has the largest device-space radius (any error from a
213 // non-uniform scale factor will go into the inner edge of the opposite axis).
214 // During animating scale factors, preserving the large axis leads to better behavior.
215 if (devStrokeWidth.x() > devStrokeWidth.y()) {
216 *strokeWidth = devStrokeWidth.x() / devScales.x();
217 } else {
218 *strokeWidth = devStrokeWidth.y() / devScales.y();
219 }
220
221 snappedDeviceRect.inset(0.5f * devScales * (*strokeWidth));
222 }
223 }
224
225 // Map back to local space so that it can be drawn with appropriate coord interpolation.
226 Rect snappedLocalRect = localToDevice.inverseMapRect(snappedDeviceRect);
227 // If the transform has an extreme scale factor or large translation, it's possible for floating
228 // point precision to round `snappedLocalRect` in such a way that re-transforming it by the
229 // local-to-device matrix no longer matches the expected device bounds.
230 if (snappedDeviceRect.nearlyEquals(localToDevice.mapRect(snappedLocalRect))) {
231 return snappedLocalRect;
232 } else {
233 // In this case we will just return the original geometry and the pixels will show
234 // fractional coverage.
235 return rect;
236 }
237 }
238
239 // If possible, snaps `dstRect` such that its device-space transformation lands on pixel bounds,
240 // and then updates `srcRect` to match the original src-to-dst coordinate mapping.
snap_src_and_dst_rect_to_pixels(const Transform & localToDevice,SkRect * srcRect,SkRect * dstRect)241 void snap_src_and_dst_rect_to_pixels(const Transform& localToDevice,
242 SkRect* srcRect,
243 SkRect* dstRect) {
244 if (localToDevice.type() > Transform::Type::kRectStaysRect) {
245 return;
246 }
247
248 // Assume snapping will succeed and always update 'src' to match; in the event snapping
249 // returns the original dst rect, then the recalculated src rect is a no-op.
250 SkMatrix dstToSrc = SkMatrix::RectToRect(*dstRect, *srcRect);
251 *dstRect = snap_rect_to_pixels(localToDevice, *dstRect).asSkRect();
252 *srcRect = dstToSrc.mapRect(*dstRect);
253 }
254
255 // Returns the inner bounds of `geometry` that is known to have full coverage. This does not worry
256 // about identifying draws that are equivalent pixel aligned and thus entirely full coverage, as
257 // that should have been caught earlier and used a coverage-less renderer from the beginning.
258 //
259 // An empty Rect is returned if there is no available inner bounds, or if it's not worth performing.
get_inner_bounds(const Geometry & geometry,const Transform & localToDevice)260 Rect get_inner_bounds(const Geometry& geometry, const Transform& localToDevice) {
261 auto applyAAInset = [&](Rect rect) {
262 // If the aa inset is too large, rect becomes empty and the inner bounds draw is
263 // automatically skipped
264 float aaInset = localToDevice.localAARadius(rect);
265 rect.inset(aaInset);
266 // Only add a second draw if it will have a reasonable number of covered pixels; otherwise
267 // we are just adding draws to sort and pipelines to switch around.
268 static constexpr float kInnerFillArea = 64*64;
269 // Approximate the device-space area based on the minimum scale factor of the transform.
270 float scaleFactor = sk_ieee_float_divide(1.f, aaInset);
271 return scaleFactor*rect.area() >= kInnerFillArea ? rect : Rect::InfiniteInverted();
272 };
273
274 if (geometry.isEdgeAAQuad()) {
275 const EdgeAAQuad& quad = geometry.edgeAAQuad();
276 if (quad.isRect()) {
277 return applyAAInset(quad.bounds());
278 }
279 // else currently we don't have a function to calculate the largest interior axis aligned
280 // bounding box of a quadrilateral so skip the inner fill draw.
281 } else if (geometry.isShape()) {
282 const Shape& shape = geometry.shape();
283 if (shape.isRect()) {
284 return applyAAInset(shape.rect());
285 } else if (shape.isRRect()) {
286 return applyAAInset(SkRRectPriv::InnerBounds(shape.rrect()));
287 }
288 }
289
290 return Rect::InfiniteInverted();
291 }
292
rect_to_pixelbounds(const Rect & r)293 SkIRect rect_to_pixelbounds(const Rect& r) {
294 return r.makeRoundOut().asSkIRect();
295 }
296
is_simple_shape(const Shape & shape,SkStrokeRec::Style type)297 bool is_simple_shape(const Shape& shape, SkStrokeRec::Style type) {
298 // We send regular filled and hairline [round] rectangles, stroked/hairline lines, and stroked
299 // [r]rects with circular corners to a single Renderer that does not trigger MSAA.
300 // Per-edge AA quadrilaterals also use the same Renderer but those are not "Shapes".
301 // These shapes and quads may also be combined with a second non-AA inner fill. This fill step
302 // is also directly used for flooding the clip
303 return (shape.isEmpty() && shape.inverted()) ||
304 (!shape.inverted() && type != SkStrokeRec::kStrokeAndFill_Style &&
305 (shape.isRect() ||
306 (shape.isLine() && type != SkStrokeRec::kFill_Style) ||
307 (shape.isRRect() && (type != SkStrokeRec::kStroke_Style ||
308 SkRRectPriv::AllCornersCircular(shape.rrect())))));
309 }
310
use_compute_atlas_when_available(PathRendererStrategy strategy)311 bool use_compute_atlas_when_available(PathRendererStrategy strategy) {
312 return strategy == PathRendererStrategy::kComputeAnalyticAA ||
313 strategy == PathRendererStrategy::kComputeMSAA16 ||
314 strategy == PathRendererStrategy::kComputeMSAA8 ||
315 strategy == PathRendererStrategy::kDefault;
316 }
317
318 } // anonymous namespace
319
320 /**
321 * IntersectionTreeSet controls multiple IntersectionTrees to organize all add rectangles into
322 * disjoint sets. For a given CompressedPaintersOrder and bounds, it returns the smallest
323 * DisjointStencilIndex that guarantees the bounds are disjoint from all other draws that use the
324 * same painters order and stencil index.
325 */
326 class Device::IntersectionTreeSet {
327 public:
328 IntersectionTreeSet() = default;
329
add(CompressedPaintersOrder drawOrder,Rect rect)330 DisjointStencilIndex add(CompressedPaintersOrder drawOrder, Rect rect) {
331 auto& trees = fTrees[drawOrder];
332 DisjointStencilIndex stencil = DrawOrder::kUnassigned.next();
333 for (auto&& tree : trees) {
334 if (tree->add(rect)) {
335 return stencil;
336 }
337 stencil = stencil.next(); // advance to the next tree's index
338 }
339
340 // If here, no existing intersection tree can hold the rect so add a new one
341 IntersectionTree* newTree = this->makeTree();
342 SkAssertResult(newTree->add(rect));
343 trees.push_back(newTree);
344 return stencil;
345 }
346
reset()347 void reset() {
348 fTrees.clear();
349 fTreeStore.reset();
350 }
351
352 private:
353 struct Hash {
operator ()skgpu::graphite::Device::IntersectionTreeSet::Hash354 size_t operator()(const CompressedPaintersOrder& o) const noexcept { return o.bits(); }
355 };
356
makeTree()357 IntersectionTree* makeTree() {
358 return fTreeStore.make<IntersectionTree>();
359 }
360
361 // Each compressed painters order defines a barrier around draws so each order's set of draws
362 // are independent, even if they may intersect. Within each order, the list of trees holds the
363 // IntersectionTrees representing each disjoint set.
364 // TODO: This organization of trees is logically convenient but may need to be optimized based
365 // on real world data (e.g. how sparse is the map, how long is each vector of trees,...)
366 std::unordered_map<CompressedPaintersOrder, std::vector<IntersectionTree*>, Hash> fTrees;
367 SkSTArenaAllocWithReset<4 * sizeof(IntersectionTree)> fTreeStore;
368 };
369
Make(Recorder * recorder,const SkImageInfo & ii,skgpu::Budgeted budgeted,Mipmapped mipmapped,SkBackingFit backingFit,const SkSurfaceProps & props,LoadOp initialLoadOp,std::string_view label,bool registerWithRecorder)370 sk_sp<Device> Device::Make(Recorder* recorder,
371 const SkImageInfo& ii,
372 skgpu::Budgeted budgeted,
373 Mipmapped mipmapped,
374 SkBackingFit backingFit,
375 const SkSurfaceProps& props,
376 LoadOp initialLoadOp,
377 std::string_view label,
378 bool registerWithRecorder) {
379 SkASSERT(!(mipmapped == Mipmapped::kYes && backingFit == SkBackingFit::kApprox));
380 if (!recorder) {
381 return nullptr;
382 }
383
384 const Caps* caps = recorder->priv().caps();
385 SkISize backingDimensions = backingFit == SkBackingFit::kApprox ? GetApproxSize(ii.dimensions())
386 : ii.dimensions();
387 auto textureInfo = caps->getDefaultSampledTextureInfo(ii.colorType(),
388 mipmapped,
389 recorder->priv().isProtected(),
390 Renderable::kYes);
391
392 return Make(recorder,
393 TextureProxy::Make(caps, recorder->priv().resourceProvider(),
394 backingDimensions, textureInfo, std::move(label), budgeted),
395 ii.dimensions(),
396 ii.colorInfo(),
397 props,
398 initialLoadOp,
399 registerWithRecorder);
400 }
401
Make(Recorder * recorder,sk_sp<TextureProxy> target,SkISize deviceSize,const SkColorInfo & colorInfo,const SkSurfaceProps & props,LoadOp initialLoadOp,bool registerWithRecorder)402 sk_sp<Device> Device::Make(Recorder* recorder,
403 sk_sp<TextureProxy> target,
404 SkISize deviceSize,
405 const SkColorInfo& colorInfo,
406 const SkSurfaceProps& props,
407 LoadOp initialLoadOp,
408 bool registerWithRecorder) {
409 if (!recorder) {
410 return nullptr;
411 }
412
413 sk_sp<DrawContext> dc = DrawContext::Make(recorder->priv().caps(),
414 std::move(target),
415 deviceSize,
416 colorInfo,
417 props);
418 if (!dc) {
419 return nullptr;
420 } else if (initialLoadOp == LoadOp::kClear) {
421 dc->clear(SkColors::kTransparent);
422 } else if (initialLoadOp == LoadOp::kDiscard) {
423 dc->discard();
424 } // else kLoad is the default initial op for a DrawContext
425
426 sk_sp<Device> device{new Device(recorder, std::move(dc))};
427 if (registerWithRecorder) {
428 // We don't register the device with the recorder until after the constructor has returned.
429 recorder->registerDevice(device);
430 } else {
431 // Since it's not registered, it should go out of scope before nextRecordingID() changes
432 // from what is saved to fScopedRecordingID.
433 SkDEBUGCODE(device->fScopedRecordingID = recorder->priv().nextRecordingID();)
434 }
435 return device;
436 }
437
438 // These default tuning numbers for the HybridBoundsManager were chosen from looking at performance
439 // and accuracy curves produced by the BoundsManagerBench for random draw bounding boxes. This
440 // config will use brute force for the first 64 draw calls to the Device and then switch to a grid
441 // that is dynamically sized to produce cells that are 16x16, up to a grid that's 32x32 cells.
442 // This seemed like a sweet spot balancing accuracy for low-draw count surfaces and overhead for
443 // high-draw count and high-resolution surfaces. With the 32x32 grid limit, cell size will increase
444 // above 16px when the surface dimension goes above 512px.
445 // TODO: These could be exposed as context options or surface options, and we may want to have
446 // different strategies in place for a base device vs. a layer's device.
447 static constexpr int kGridCellSize = 16;
448 static constexpr int kMaxBruteForceN = 64;
449 static constexpr int kMaxGridSize = 32;
450
Device(Recorder * recorder,sk_sp<DrawContext> dc)451 Device::Device(Recorder* recorder, sk_sp<DrawContext> dc)
452 : SkDevice(dc->imageInfo(), dc->surfaceProps())
453 , fRecorder(recorder)
454 , fDC(std::move(dc))
455 , fClip(this)
456 , fColorDepthBoundsManager(std::make_unique<HybridBoundsManager>(
457 fDC->imageInfo().dimensions(), kGridCellSize, kMaxBruteForceN, kMaxGridSize))
458 , fDisjointStencilSet(std::make_unique<IntersectionTreeSet>())
459 , fCachedLocalToDevice(SkM44())
460 , fCurrentDepth(DrawOrder::kClearDepth)
461 , fSubRunControl(recorder->priv().caps()->getSubRunControl(
462 fDC->surfaceProps().isUseDeviceIndependentFonts())) {
463 SkASSERT(SkToBool(fDC) && SkToBool(fRecorder));
464 if (fRecorder->priv().caps()->defaultMSAASamplesCount() > 1) {
465 if (fRecorder->priv().caps()->msaaRenderToSingleSampledSupport()) {
466 fMSAASupported = true;
467 } else {
468 TextureInfo msaaTexInfo =
469 fRecorder->priv().caps()->getDefaultMSAATextureInfo(fDC->target()->textureInfo(),
470 Discardable::kYes);
471 fMSAASupported = msaaTexInfo.isValid();
472 }
473 }
474 }
475
~Device()476 Device::~Device() {
477 // The Device should have been marked immutable before it's destroyed, or the Recorder was the
478 // last holder of a reference to it and de-registered the device as part of its cleanup.
479 // However, if the Device was not registered with the recorder (i.e. a scratch device) we don't
480 // require that its recorder be adandoned. Scratch devices must either have been marked
481 // immutable or be destroyed before the recorder has been snapped.
482 SkASSERT(!fRecorder || fScopedRecordingID != 0);
483 #if defined(SK_DEBUG)
484 if (fScopedRecordingID != 0 && fRecorder) {
485 SkASSERT(fScopedRecordingID == fRecorder->priv().nextRecordingID());
486 }
487 // else it wasn't a scratch device, or it was a scratch device that was marked immutable so its
488 // lifetime was validated when setImmutable() was called.
489 #endif
490 }
491
setImmutable()492 void Device::setImmutable() {
493 if (fRecorder) {
494 // Push any pending work to the Recorder now. setImmutable() is only called by the
495 // destructor of a client-owned Surface, or explicitly in layer/filtering workflows. In
496 // both cases this is restricted to the Recorder's thread. This is in contrast to ~Device(),
497 // which might be called from another thread if it was linked to an Image used in multiple
498 // recorders.
499 this->flushPendingWorkToRecorder();
500 fRecorder->deregisterDevice(this);
501 // Abandoning the recorder ensures that there are no further operations that can be recorded
502 // and is relied on by Image::notifyInUse() to detect when it can unlink from a Device.
503 this->abandonRecorder();
504 }
505 }
506
localToDeviceTransform()507 const Transform& Device::localToDeviceTransform() {
508 if (this->checkLocalToDeviceDirty()) {
509 fCachedLocalToDevice = Transform{this->localToDevice44()};
510 }
511 return fCachedLocalToDevice;
512 }
513
strikeDeviceInfo() const514 SkStrikeDeviceInfo Device::strikeDeviceInfo() const {
515 return {this->surfaceProps(), this->scalerContextFlags(), &fSubRunControl};
516 }
517
createDevice(const CreateInfo & info,const SkPaint *)518 sk_sp<SkDevice> Device::createDevice(const CreateInfo& info, const SkPaint*) {
519 // TODO: Inspect the paint and create info to determine if there's anything that has to be
520 // modified to support inline subpasses.
521 SkSurfaceProps props =
522 this->surfaceProps().cloneWithPixelGeometry(info.fPixelGeometry);
523
524 // Skia's convention is to only clear a device if it is non-opaque.
525 LoadOp initialLoadOp = info.fInfo.isOpaque() ? LoadOp::kDiscard : LoadOp::kClear;
526
527 std::string label = this->target()->label();
528 if (label.empty()) {
529 label = "ChildDevice";
530 } else {
531 label += "_ChildDevice";
532 }
533
534 return Make(fRecorder,
535 info.fInfo,
536 skgpu::Budgeted::kYes,
537 Mipmapped::kNo,
538 SkBackingFit::kApprox,
539 props,
540 initialLoadOp,
541 label);
542 }
543
makeSurface(const SkImageInfo & ii,const SkSurfaceProps & props)544 sk_sp<SkSurface> Device::makeSurface(const SkImageInfo& ii, const SkSurfaceProps& props) {
545 return SkSurfaces::RenderTarget(fRecorder, ii, Mipmapped::kNo, &props);
546 }
547
makeImageCopy(const SkIRect & subset,Budgeted budgeted,Mipmapped mipmapped,SkBackingFit backingFit)548 sk_sp<Image> Device::makeImageCopy(const SkIRect& subset,
549 Budgeted budgeted,
550 Mipmapped mipmapped,
551 SkBackingFit backingFit) {
552 ASSERT_SINGLE_OWNER
553 this->flushPendingWorkToRecorder();
554
555 const SkColorInfo& colorInfo = this->imageInfo().colorInfo();
556 TextureProxyView srcView = this->readSurfaceView();
557 if (!srcView) {
558 // readSurfaceView() returns an empty view when the target is not texturable. Create an
559 // equivalent view for the blitting operation.
560 Swizzle readSwizzle = fRecorder->priv().caps()->getReadSwizzle(
561 colorInfo.colorType(), this->target()->textureInfo());
562 srcView = {sk_ref_sp(this->target()), readSwizzle};
563 }
564 std::string label = this->target()->label();
565 if (label.empty()) {
566 label = "CopyDeviceTexture";
567 } else {
568 label += "_DeviceCopy";
569 }
570
571 return Image::Copy(fRecorder, srcView, colorInfo, subset, budgeted, mipmapped, backingFit,
572 label);
573 }
574
onReadPixels(const SkPixmap & pm,int srcX,int srcY)575 bool Device::onReadPixels(const SkPixmap& pm, int srcX, int srcY) {
576 #if defined(GPU_TEST_UTILS)
577 // This testing-only function should only be called before the Device has detached from its
578 // Recorder, since it's accessed via the test-held Surface.
579 ASSERT_SINGLE_OWNER
580 if (Context* context = fRecorder->priv().context()) {
581 // Add all previous commands generated to the command buffer.
582 // If the client snaps later they'll only get post-read commands in their Recording,
583 // but since they're doing a readPixels in the middle that shouldn't be unexpected.
584 std::unique_ptr<Recording> recording = fRecorder->snap();
585 if (!recording) {
586 return false;
587 }
588 InsertRecordingInfo info;
589 info.fRecording = recording.get();
590 if (!context->insertRecording(info)) {
591 return false;
592 }
593 return context->priv().readPixels(pm, fDC->target(), this->imageInfo(), srcX, srcY);
594 }
595 #endif
596 // We have no access to a context to do a read pixels here.
597 return false;
598 }
599
onWritePixels(const SkPixmap & src,int x,int y)600 bool Device::onWritePixels(const SkPixmap& src, int x, int y) {
601 ASSERT_SINGLE_OWNER
602 // TODO: we may need to share this in a more central place to handle uploads
603 // to backend textures
604
605 const TextureProxy* target = fDC->target();
606
607 // TODO: add mipmap support for createBackendTexture
608
609 if (src.colorType() == kUnknown_SkColorType) {
610 return false;
611 }
612
613 // If one alpha type is unknown and the other isn't, it's too underspecified.
614 if ((src.alphaType() == kUnknown_SkAlphaType) !=
615 (this->imageInfo().alphaType() == kUnknown_SkAlphaType)) {
616 return false;
617 }
618
619 // TODO: canvas2DFastPath?
620
621 if (!fRecorder->priv().caps()->supportsWritePixels(target->textureInfo())) {
622 auto image = SkImages::RasterFromPixmap(src, nullptr, nullptr);
623 image = SkImages::TextureFromImage(fRecorder, image.get());
624 if (!image) {
625 return false;
626 }
627
628 SkPaint paint;
629 paint.setBlendMode(SkBlendMode::kSrc);
630 this->drawImageRect(image.get(),
631 /*src=*/nullptr,
632 SkRect::MakeXYWH(x, y, src.width(), src.height()),
633 SkFilterMode::kNearest,
634 paint,
635 SkCanvas::kFast_SrcRectConstraint);
636 return true;
637 }
638
639 // TODO: check for flips and either handle here or pass info to UploadTask
640
641 // Determine rect to copy
642 SkIRect dstRect = SkIRect::MakePtSize({x, y}, src.dimensions());
643 if (!target->isFullyLazy() && !dstRect.intersect(SkIRect::MakeSize(target->dimensions()))) {
644 return false;
645 }
646
647 // Set up copy location
648 const void* addr = src.addr(dstRect.fLeft - x, dstRect.fTop - y);
649 std::vector<MipLevel> levels;
650 levels.push_back({addr, src.rowBytes()});
651
652 // The writePixels() still respects painter's order, so flush everything to tasks before this
653 // recording the upload for the pixel data.
654 this->internalFlush();
655 // The new upload will be executed before any new draws are recorded and also ensures that
656 // the next call to flushDeviceToRecorder() will produce a non-null DrawTask. If this Device's
657 // target is mipmapped, mipmap generation tasks will be added automatically at that point.
658 return fDC->recordUpload(fRecorder, fDC->refTarget(), src.info().colorInfo(),
659 this->imageInfo().colorInfo(), levels, dstRect, nullptr);
660 }
661
662
663 ///////////////////////////////////////////////////////////////////////////////
664
isClipAntiAliased() const665 bool Device::isClipAntiAliased() const {
666 // All clips are AA'ed unless it's wide-open, empty, or a device-rect with integer coordinates
667 ClipStack::ClipState type = fClip.clipState();
668 if (type == ClipStack::ClipState::kWideOpen || type == ClipStack::ClipState::kEmpty) {
669 return false;
670 } else if (type == ClipStack::ClipState::kDeviceRect) {
671 const ClipStack::Element rect = *fClip.begin();
672 SkASSERT(rect.fShape.isRect() && rect.fLocalToDevice.type() == Transform::Type::kIdentity);
673 return rect.fShape.rect() != rect.fShape.rect().makeRoundOut();
674 } else {
675 return true;
676 }
677 }
678
devClipBounds() const679 SkIRect Device::devClipBounds() const {
680 return rect_to_pixelbounds(fClip.conservativeBounds());
681 }
682
683 // TODO: This is easy enough to support, but do we still need this API in Skia at all?
android_utils_clipAsRgn(SkRegion * region) const684 void Device::android_utils_clipAsRgn(SkRegion* region) const {
685 SkIRect bounds = this->devClipBounds();
686 // Assume wide open and then perform intersect/difference operations reducing the region
687 region->setRect(bounds);
688 const SkRegion deviceBounds(bounds);
689 for (const ClipStack::Element& e : fClip) {
690 SkRegion tmp;
691 if (e.fShape.isRect() && e.fLocalToDevice.type() == Transform::Type::kIdentity) {
692 tmp.setRect(rect_to_pixelbounds(e.fShape.rect()));
693 } else {
694 SkPath tmpPath = e.fShape.asPath();
695 tmpPath.transform(e.fLocalToDevice);
696 tmp.setPath(tmpPath, deviceBounds);
697 }
698
699 region->op(tmp, (SkRegion::Op) e.fOp);
700 }
701 }
702
clipRect(const SkRect & rect,SkClipOp op,bool aa)703 void Device::clipRect(const SkRect& rect, SkClipOp op, bool aa) {
704 SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
705 auto snapping = aa ? ClipStack::PixelSnapping::kNo : ClipStack::PixelSnapping::kYes;
706 fClip.clipShape(this->localToDeviceTransform(), Shape{rect}, op, snapping);
707 }
708
clipRRect(const SkRRect & rrect,SkClipOp op,bool aa)709 void Device::clipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
710 SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
711 auto snapping = aa ? ClipStack::PixelSnapping::kNo : ClipStack::PixelSnapping::kYes;
712 fClip.clipShape(this->localToDeviceTransform(), Shape{rrect}, op, snapping);
713 }
714
clipPath(const SkPath & path,SkClipOp op,bool aa)715 void Device::clipPath(const SkPath& path, SkClipOp op, bool aa) {
716 SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
717 // TODO: Ensure all path inspection is handled here or in SkCanvas, and that non-AA rects as
718 // paths are routed appropriately.
719 // TODO: Must also detect paths that are lines so the clip stack can be set to empty
720 fClip.clipShape(this->localToDeviceTransform(), Shape{path}, op);
721 }
722
onClipShader(sk_sp<SkShader> shader)723 void Device::onClipShader(sk_sp<SkShader> shader) {
724 fClip.clipShader(std::move(shader));
725 }
726
727 // TODO: Is clipRegion() on the deprecation chopping block. If not it should be...
clipRegion(const SkRegion & globalRgn,SkClipOp op)728 void Device::clipRegion(const SkRegion& globalRgn, SkClipOp op) {
729 SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
730
731 Transform globalToDevice{this->globalToDevice()};
732
733 if (globalRgn.isEmpty()) {
734 fClip.clipShape(globalToDevice, Shape{}, op);
735 } else if (globalRgn.isRect()) {
736 fClip.clipShape(globalToDevice, Shape{SkRect::Make(globalRgn.getBounds())}, op,
737 ClipStack::PixelSnapping::kYes);
738 } else {
739 // TODO: Can we just iterate the region and do non-AA rects for each chunk?
740 SkPath path;
741 globalRgn.getBoundaryPath(&path);
742 fClip.clipShape(globalToDevice, Shape{path}, op);
743 }
744 }
745
replaceClip(const SkIRect & rect)746 void Device::replaceClip(const SkIRect& rect) {
747 // ReplaceClip() is currently not intended to be supported in Graphite since it's only used
748 // for emulating legacy clip ops in Android Framework, and apps/devices that require that
749 // should not use Graphite. However, if it needs to be supported, we could probably implement
750 // it by:
751 // 1. Flush all pending clip element depth draws.
752 // 2. Draw a fullscreen rect to the depth attachment using a Z value greater than what's
753 // been used so far.
754 // 3. Make sure all future "unclipped" draws use this Z value instead of 0 so they aren't
755 // sorted before the depth reset.
756 // 4. Make sure all prior elements are inactive so they can't affect subsequent draws.
757 //
758 // For now, just ignore it.
759 }
760
761 ///////////////////////////////////////////////////////////////////////////////
762
drawPaint(const SkPaint & paint)763 void Device::drawPaint(const SkPaint& paint) {
764 ASSERT_SINGLE_OWNER
765 // We never want to do a fullscreen clear on a fully-lazy render target, because the device size
766 // may be smaller than the final surface we draw to, in which case we don't want to fill the
767 // entire final surface.
768 if (this->isClipWideOpen() && !fDC->target()->isFullyLazy()) {
769 if (!paint_depends_on_dst(paint)) {
770 if (std::optional<SkColor4f> color = extract_paint_color(paint, fDC->colorInfo())) {
771 // do fullscreen clear
772 fDC->clear(*color);
773 return;
774 } else {
775 // This paint does not depend on the destination and covers the entire surface, so
776 // discard everything previously recorded and proceed with the draw.
777 fDC->discard();
778 }
779 }
780 }
781
782 Shape inverseFill; // defaults to empty
783 inverseFill.setInverted(true);
784 // An empty shape with an inverse fill completely floods the clip
785 SkASSERT(inverseFill.isEmpty() && inverseFill.inverted());
786
787 this->drawGeometry(this->localToDeviceTransform(),
788 Geometry(inverseFill),
789 paint,
790 DefaultFillStyle(),
791 DrawFlags::kIgnorePathEffect);
792 }
793
drawRect(const SkRect & r,const SkPaint & paint)794 void Device::drawRect(const SkRect& r, const SkPaint& paint) {
795 Rect rectToDraw(r);
796 SkStrokeRec style(paint);
797 if (!paint.isAntiAlias()) {
798 // Graphite assumes everything is anti-aliased. In the case of axis-aligned non-aa requested
799 // rectangles, we snap the local geometry to land on pixel boundaries to emulate non-aa.
800 if (style.isFillStyle()) {
801 rectToDraw = snap_rect_to_pixels(this->localToDeviceTransform(), rectToDraw);
802 } else {
803 const bool strokeAndFill = style.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
804 float strokeWidth = style.getWidth();
805 rectToDraw = snap_rect_to_pixels(this->localToDeviceTransform(),
806 rectToDraw, &strokeWidth);
807 style.setStrokeStyle(strokeWidth, strokeAndFill);
808 }
809 }
810 this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(rectToDraw)), paint, style);
811 }
812
drawVertices(const SkVertices * vertices,sk_sp<SkBlender> blender,const SkPaint & paint,bool skipColorXform)813 void Device::drawVertices(const SkVertices* vertices, sk_sp<SkBlender> blender,
814 const SkPaint& paint, bool skipColorXform) {
815 // TODO - Add GPU handling of skipColorXform once Graphite has its color system more fleshed out.
816 this->drawGeometry(this->localToDeviceTransform(),
817 Geometry(sk_ref_sp(vertices)),
818 paint,
819 DefaultFillStyle(),
820 DrawFlags::kIgnorePathEffect,
821 std::move(blender),
822 skipColorXform);
823 }
824
drawAsTiledImageRect(SkCanvas * canvas,const SkImage * image,const SkRect * src,const SkRect & dst,const SkSamplingOptions & sampling,const SkPaint & paint,SkCanvas::SrcRectConstraint constraint)825 bool Device::drawAsTiledImageRect(SkCanvas* canvas,
826 const SkImage* image,
827 const SkRect* src,
828 const SkRect& dst,
829 const SkSamplingOptions& sampling,
830 const SkPaint& paint,
831 SkCanvas::SrcRectConstraint constraint) {
832 auto recorder = canvas->recorder();
833 if (!recorder) {
834 return false;
835 }
836 SkASSERT(src);
837
838 // For Graphite this is a pretty loose heuristic. The Recorder-local cache size (relative
839 // to the large image's size) is used as a proxy for how conservative we should be when
840 // allocating tiles. Since the tiles will actually be owned by the client (via an
841 // ImageProvider) they won't actually add any memory pressure directly to Graphite.
842 size_t cacheSize = recorder->priv().getResourceCacheLimit();
843 size_t maxTextureSize = recorder->priv().caps()->maxTextureSize();
844
845 #if defined(GPU_TEST_UTILS)
846 if (gOverrideMaxTextureSizeGraphite) {
847 maxTextureSize = gOverrideMaxTextureSizeGraphite;
848 }
849 gNumTilesDrawnGraphite.store(0, std::memory_order_relaxed);
850 #endif
851
852 // DrawAsTiledImageRect produces per-edge AA quads, which do not participate in non-AA pixel
853 // snapping emulation. To match an un-tiled drawImageRect, round the src and dst geometry
854 // before any tiling occurs.
855 SkRect finalSrc = *src;
856 SkRect finalDst = dst;
857 if (!paint.isAntiAlias()) {
858 snap_src_and_dst_rect_to_pixels(this->localToDeviceTransform(),
859 &finalSrc, &finalDst);
860 }
861
862 [[maybe_unused]] auto [wasTiled, numTiles] =
863 skgpu::TiledTextureUtils::DrawAsTiledImageRect(canvas,
864 image,
865 finalSrc,
866 finalDst,
867 SkCanvas::kAll_QuadAAFlags,
868 sampling,
869 &paint,
870 constraint,
871 /* sharpenMM= */ true,
872 cacheSize,
873 maxTextureSize);
874 #if defined(GPU_TEST_UTILS)
875 gNumTilesDrawnGraphite.store(numTiles, std::memory_order_relaxed);
876 #endif
877 return wasTiled;
878 }
879
drawOval(const SkRect & oval,const SkPaint & paint)880 void Device::drawOval(const SkRect& oval, const SkPaint& paint) {
881 if (paint.getPathEffect()) {
882 // Dashing requires that the oval path starts on the right side and travels clockwise. This
883 // is the default for the SkPath::Oval constructor, as used by SkBitmapDevice.
884 this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(SkPath::Oval(oval))),
885 paint, SkStrokeRec(paint));
886 } else {
887 // TODO: This has wasted effort from the SkCanvas level since it instead converts rrects
888 // that happen to be ovals into this, only for us to go right back to rrect.
889 this->drawRRect(SkRRect::MakeOval(oval), paint);
890 }
891 }
892
drawArc(const SkArc & arc,const SkPaint & paint)893 void Device::drawArc(const SkArc& arc, const SkPaint& paint) {
894 // For sweeps >= 360°, simple fills and simple strokes without the center point or square caps
895 // are ovals. Culling these here simplifies the path processing in Shape.
896 if (!paint.getPathEffect() &&
897 SkScalarAbs(arc.sweepAngle()) >= 360.f &&
898 (paint.getStyle() == SkPaint::kFill_Style ||
899 (paint.getStyle() == SkPaint::kStroke_Style &&
900 // square caps can stick out from the shape so we can't do this with an rrect draw
901 paint.getStrokeCap() != SkPaint::kSquare_Cap &&
902 // non-wedge cases with strokes will draw lines to the center
903 !arc.isWedge()))) {
904 this->drawRRect(SkRRect::MakeOval(arc.oval()), paint);
905 } else {
906 this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(arc)),
907 paint, SkStrokeRec(paint));
908 }
909 }
910
drawRRect(const SkRRect & rr,const SkPaint & paint)911 void Device::drawRRect(const SkRRect& rr, const SkPaint& paint) {
912 Shape rrectToDraw;
913 SkStrokeRec style(paint);
914
915 if (paint.isAntiAlias()) {
916 rrectToDraw.setRRect(rr);
917 } else {
918 // Snap the horizontal and vertical edges of the rounded rectangle to pixel edges to match
919 // the behavior of drawRect(rr.bounds()), to partially emulate non-AA rendering while
920 // preserving the anti-aliasing of the curved corners.
921 Rect snappedBounds;
922 if (style.isFillStyle()) {
923 snappedBounds = snap_rect_to_pixels(this->localToDeviceTransform(), rr.rect());
924 } else {
925 const bool strokeAndFill = style.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
926 float strokeWidth = style.getWidth();
927 snappedBounds = snap_rect_to_pixels(this->localToDeviceTransform(),
928 rr.rect(), &strokeWidth);
929 style.setStrokeStyle(strokeWidth, strokeAndFill);
930 }
931
932 SkRRect snappedRRect;
933 snappedRRect.setRectRadii(snappedBounds.asSkRect(), rr.radii().data());
934 rrectToDraw.setRRect(snappedRRect);
935 }
936
937 this->drawGeometry(this->localToDeviceTransform(), Geometry(rrectToDraw), paint, style);
938 }
939
drawPath(const SkPath & path,const SkPaint & paint,bool pathIsMutable)940 void Device::drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) {
941 // Alternatively, we could move this analysis to SkCanvas. Also, we could consider applying the
942 // path effect, being careful about starting point and direction.
943 if (!paint.getPathEffect() && !path.isInverseFillType()) {
944 if (SkRect oval; path.isOval(&oval)) {
945 this->drawOval(oval, paint);
946 return;
947 }
948 if (SkRRect rrect; path.isRRect(&rrect)) {
949 this->drawRRect(rrect, paint);
950 return;
951 }
952 // For rects, if the path is not explicitly closed and the paint style is stroked then it
953 // represents a rectangle with only 3 sides rasterized (and with any caps). If it's filled
954 // or is closed+stroked, then the path renders identically to the rectangle.
955 bool isClosed = false;
956 if (SkRect rect; path.isRect(&rect, &isClosed) &&
957 (paint.getStyle() == SkPaint::kFill_Style || isClosed)) {
958 this->drawRect(rect, paint);
959 return;
960 }
961 }
962 this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(path)),
963 paint, SkStrokeRec(paint));
964 }
965
drawPoints(SkCanvas::PointMode mode,size_t count,const SkPoint * points,const SkPaint & paint)966 void Device::drawPoints(SkCanvas::PointMode mode, size_t count,
967 const SkPoint* points, const SkPaint& paint) {
968 SkStrokeRec stroke(paint, SkPaint::kStroke_Style);
969 size_t next = 0;
970 if (mode == SkCanvas::kPoints_PointMode) {
971 // Treat kPoints mode as stroking zero-length path segments, which produce caps so that
972 // both hairlines and round vs. square geometry are handled entirely on the GPU.
973 // TODO: SkCanvas should probably do the butt to square cap correction.
974 if (paint.getStrokeCap() == SkPaint::kButt_Cap) {
975 stroke.setStrokeParams(SkPaint::kSquare_Cap,
976 paint.getStrokeJoin(),
977 paint.getStrokeMiter());
978 }
979 } else {
980 next = 1;
981 count--;
982 }
983
984 size_t inc = mode == SkCanvas::kLines_PointMode ? 2 : 1;
985 for (size_t i = 0; i < count; i += inc) {
986 this->drawGeometry(this->localToDeviceTransform(),
987 Geometry(Shape(points[i], points[i + next])),
988 paint, stroke);
989 }
990 }
991
drawEdgeAAQuad(const SkRect & rect,const SkPoint clip[4],SkCanvas::QuadAAFlags aaFlags,const SkColor4f & color,SkBlendMode mode)992 void Device::drawEdgeAAQuad(const SkRect& rect,
993 const SkPoint clip[4],
994 SkCanvas::QuadAAFlags aaFlags,
995 const SkColor4f& color,
996 SkBlendMode mode) {
997 SkPaint solidColorPaint;
998 solidColorPaint.setColor4f(color, /*colorSpace=*/nullptr);
999 solidColorPaint.setBlendMode(mode);
1000
1001 // NOTE: We do not snap edge AA quads that are fully non-AA because we need their edges to seam
1002 // with quads that have mixed edge flags (so both need to match the GPU rasterization, not our
1003 // CPU rounding).
1004 auto flags = SkEnumBitMask<EdgeAAQuad::Flags>(static_cast<EdgeAAQuad::Flags>(aaFlags));
1005 EdgeAAQuad quad = clip ? EdgeAAQuad(clip, flags) : EdgeAAQuad(rect, flags);
1006 this->drawGeometry(this->localToDeviceTransform(),
1007 Geometry(quad),
1008 solidColorPaint,
1009 DefaultFillStyle(),
1010 DrawFlags::kIgnorePathEffect);
1011 }
1012
drawEdgeAAImageSet(const SkCanvas::ImageSetEntry set[],int count,const SkPoint dstClips[],const SkMatrix preViewMatrices[],const SkSamplingOptions & sampling,const SkPaint & paint,SkCanvas::SrcRectConstraint constraint)1013 void Device::drawEdgeAAImageSet(const SkCanvas::ImageSetEntry set[], int count,
1014 const SkPoint dstClips[], const SkMatrix preViewMatrices[],
1015 const SkSamplingOptions& sampling, const SkPaint& paint,
1016 SkCanvas::SrcRectConstraint constraint) {
1017 SkASSERT(count > 0);
1018
1019 SkPaint paintWithShader(paint);
1020 int dstClipIndex = 0;
1021 for (int i = 0; i < count; ++i) {
1022 // If the entry is clipped by 'dstClips', that must be provided
1023 SkASSERT(!set[i].fHasClip || dstClips);
1024 // Similarly, if it has an extra transform, those must be provided
1025 SkASSERT(set[i].fMatrixIndex < 0 || preViewMatrices);
1026
1027 auto [ imageToDraw, newSampling ] =
1028 skgpu::graphite::GetGraphiteBacked(this->recorder(), set[i].fImage.get(), sampling);
1029 if (!imageToDraw) {
1030 SKGPU_LOG_W("Device::drawImageRect: Creation of Graphite-backed image failed");
1031 return;
1032 }
1033
1034 // TODO: Produce an image shading paint key and data directly without having to reconstruct
1035 // the equivalent SkPaint for each entry. Reuse the key and data between entries if possible
1036 paintWithShader.setShader(paint.refShader());
1037 paintWithShader.setAlphaf(paint.getAlphaf() * set[i].fAlpha);
1038 SkRect dst = SkModifyPaintAndDstForDrawImageRect(
1039 imageToDraw.get(), newSampling, set[i].fSrcRect, set[i].fDstRect,
1040 constraint == SkCanvas::kStrict_SrcRectConstraint,
1041 &paintWithShader);
1042 if (dst.isEmpty()) {
1043 return;
1044 }
1045
1046 // NOTE: See drawEdgeAAQuad for details, we do not snap non-AA quads.
1047 auto flags =
1048 SkEnumBitMask<EdgeAAQuad::Flags>(static_cast<EdgeAAQuad::Flags>(set[i].fAAFlags));
1049 EdgeAAQuad quad = set[i].fHasClip ? EdgeAAQuad(dstClips + dstClipIndex, flags)
1050 : EdgeAAQuad(dst, flags);
1051
1052 // TODO: Calling drawGeometry() for each entry re-evaluates the clip stack every time, which
1053 // is consistent with Ganesh's behavior. It also matches the behavior if edge-AA images were
1054 // submitted one at a time by SkiaRenderer (a nice client simplification). However, we
1055 // should explore the performance trade off with doing one bulk evaluation for the whole set
1056 if (set[i].fMatrixIndex < 0) {
1057 this->drawGeometry(this->localToDeviceTransform(),
1058 Geometry(quad),
1059 paintWithShader,
1060 DefaultFillStyle(),
1061 DrawFlags::kIgnorePathEffect);
1062 } else {
1063 SkM44 xtraTransform(preViewMatrices[set[i].fMatrixIndex]);
1064 this->drawGeometry(this->localToDeviceTransform().concat(xtraTransform),
1065 Geometry(quad),
1066 paintWithShader,
1067 DefaultFillStyle(),
1068 DrawFlags::kIgnorePathEffect);
1069 }
1070
1071 dstClipIndex += 4 * set[i].fHasClip;
1072 }
1073 }
1074
drawImageRect(const SkImage * image,const SkRect * src,const SkRect & dst,const SkSamplingOptions & sampling,const SkPaint & paint,SkCanvas::SrcRectConstraint constraint)1075 void Device::drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
1076 const SkSamplingOptions& sampling, const SkPaint& paint,
1077 SkCanvas::SrcRectConstraint constraint) {
1078 SkCanvas::ImageSetEntry single{sk_ref_sp(image),
1079 src ? *src : SkRect::Make(image->bounds()),
1080 dst,
1081 /*alpha=*/1.f,
1082 SkCanvas::kAll_QuadAAFlags};
1083 // While this delegates to drawEdgeAAImageSet() for the image shading logic, semantically a
1084 // drawImageRect()'s non-AA behavior should match that of drawRect() so we snap dst (and update
1085 // src to match) if needed before hand.
1086 if (!paint.isAntiAlias()) {
1087 snap_src_and_dst_rect_to_pixels(this->localToDeviceTransform(),
1088 &single.fSrcRect, &single.fDstRect);
1089 }
1090 this->drawEdgeAAImageSet(&single, 1, nullptr, nullptr, sampling, paint, constraint);
1091 }
1092
atlasDelegate()1093 sktext::gpu::AtlasDrawDelegate Device::atlasDelegate() {
1094 return [&](const sktext::gpu::AtlasSubRun* subRun,
1095 SkPoint drawOrigin,
1096 const SkPaint& paint,
1097 sk_sp<SkRefCnt> subRunStorage,
1098 sktext::gpu::RendererData rendererData) {
1099 this->drawAtlasSubRun(subRun, drawOrigin, paint, std::move(subRunStorage), rendererData);
1100 };
1101 }
1102
onDrawGlyphRunList(SkCanvas * canvas,const sktext::GlyphRunList & glyphRunList,const SkPaint & paint)1103 void Device::onDrawGlyphRunList(SkCanvas* canvas,
1104 const sktext::GlyphRunList& glyphRunList,
1105 const SkPaint& paint) {
1106 ASSERT_SINGLE_OWNER
1107 fRecorder->priv().textBlobCache()->drawGlyphRunList(canvas,
1108 this->localToDevice(),
1109 glyphRunList,
1110 paint,
1111 this->strikeDeviceInfo(),
1112 this->atlasDelegate());
1113 }
1114
drawAtlasSubRun(const sktext::gpu::AtlasSubRun * subRun,SkPoint drawOrigin,const SkPaint & paint,sk_sp<SkRefCnt> subRunStorage,sktext::gpu::RendererData rendererData)1115 void Device::drawAtlasSubRun(const sktext::gpu::AtlasSubRun* subRun,
1116 SkPoint drawOrigin,
1117 const SkPaint& paint,
1118 sk_sp<SkRefCnt> subRunStorage,
1119 sktext::gpu::RendererData rendererData) {
1120 ASSERT_SINGLE_OWNER
1121
1122 const int subRunEnd = subRun->glyphCount();
1123 auto regenerateDelegate = [&](sktext::gpu::GlyphVector* glyphs,
1124 int begin,
1125 int end,
1126 skgpu::MaskFormat maskFormat,
1127 int padding) {
1128 return glyphs->regenerateAtlasForGraphite(begin, end, maskFormat, padding, fRecorder);
1129 };
1130 for (int subRunCursor = 0; subRunCursor < subRunEnd;) {
1131 // For the remainder of the run, add any atlas uploads to the Recorder's TextAtlasManager
1132 auto[ok, glyphsRegenerated] = subRun->regenerateAtlas(subRunCursor, subRunEnd,
1133 regenerateDelegate);
1134 // There was a problem allocating the glyph in the atlas. Bail.
1135 if (!ok) {
1136 return;
1137 }
1138 if (glyphsRegenerated) {
1139 auto [bounds, localToDevice] = subRun->vertexFiller().boundsAndDeviceMatrix(
1140 this->localToDeviceTransform(), drawOrigin);
1141 SkPaint subRunPaint = paint;
1142 // For color emoji, shaders don't affect the final color
1143 if (subRun->maskFormat() == skgpu::MaskFormat::kARGB) {
1144 subRunPaint.setShader(nullptr);
1145 }
1146
1147 bool useGammaCorrectDistanceTable =
1148 this->imageInfo().colorSpace() &&
1149 this->imageInfo().colorSpace()->gammaIsLinear();
1150 this->drawGeometry(localToDevice,
1151 Geometry(SubRunData(subRun,
1152 subRunStorage,
1153 bounds,
1154 this->localToDeviceTransform().inverse(),
1155 subRunCursor,
1156 glyphsRegenerated,
1157 SkPaintPriv::ComputeLuminanceColor(subRunPaint),
1158 useGammaCorrectDistanceTable,
1159 this->surfaceProps().pixelGeometry(),
1160 fRecorder,
1161 rendererData)),
1162 subRunPaint,
1163 DefaultFillStyle(),
1164 DrawFlags::kIgnorePathEffect,
1165 SkBlender::Mode(SkBlendMode::kDstIn));
1166 }
1167 subRunCursor += glyphsRegenerated;
1168
1169 if (subRunCursor < subRunEnd) {
1170 // Flush if not all the glyphs are handled because the atlas is out of space.
1171 // We flush every Device because the glyphs that are being flushed/referenced are not
1172 // necessarily specific to this Device. This addresses both multiple SkSurfaces within
1173 // a Recorder, and nested layers.
1174 TRACE_EVENT_INSTANT0("skia.gpu", "Glyph atlas full", TRACE_EVENT_SCOPE_NAME_THREAD);
1175 fRecorder->priv().flushTrackedDevices();
1176 }
1177 }
1178 }
1179
drawGeometry(const Transform & localToDevice,const Geometry & geometry,const SkPaint & paint,const SkStrokeRec & style,SkEnumBitMask<DrawFlags> flags,sk_sp<SkBlender> primitiveBlender,bool skipColorXform)1180 void Device::drawGeometry(const Transform& localToDevice,
1181 const Geometry& geometry,
1182 const SkPaint& paint,
1183 const SkStrokeRec& style,
1184 SkEnumBitMask<DrawFlags> flags,
1185 sk_sp<SkBlender> primitiveBlender,
1186 bool skipColorXform) {
1187 ASSERT_SINGLE_OWNER
1188
1189 if (!localToDevice.valid()) {
1190 // If the transform is not invertible or not finite then drawing isn't well defined.
1191 SKGPU_LOG_W("Skipping draw with non-invertible/non-finite transform.");
1192 return;
1193 }
1194
1195 // Heavy weight paint options like path effects, mask filters, and stroke-and-fill style are
1196 // applied on the CPU by generating a new shape and recursing on drawGeometry with updated flags
1197 if (!(flags & DrawFlags::kIgnorePathEffect) && paint.getPathEffect()) {
1198 // Apply the path effect before anything else, which if we are applying here, means that we
1199 // are dealing with a Shape. drawVertices (and a SkVertices geometry) should pass in
1200 // kIgnorePathEffect per SkCanvas spec. Text geometry also should pass in kIgnorePathEffect
1201 // because the path effect is applied per glyph by the SkStrikeSpec already.
1202 SkASSERT(geometry.isShape());
1203
1204 // TODO: If asADash() returns true and the base path matches the dashing fast path, then
1205 // that should be detected now as well. Maybe add dashPath to Device so canvas can handle it
1206 SkStrokeRec newStyle = style;
1207 float maxScaleFactor = localToDevice.maxScaleFactor();
1208 if (localToDevice.type() == Transform::Type::kPerspective) {
1209 auto bounds = geometry.bounds();
1210 float tl = std::get<1>(localToDevice.scaleFactors({bounds.left(), bounds.top()}));
1211 float tr = std::get<1>(localToDevice.scaleFactors({bounds.right(), bounds.top()}));
1212 float br = std::get<1>(localToDevice.scaleFactors({bounds.right(), bounds.bot()}));
1213 float bl = std::get<1>(localToDevice.scaleFactors({bounds.left(), bounds.bot()}));
1214 maxScaleFactor = std::max(std::max(tl, tr), std::max(bl, br));
1215 }
1216 newStyle.setResScale(maxScaleFactor);
1217 SkPath dst;
1218 if (paint.getPathEffect()->filterPath(&dst, geometry.shape().asPath(), &newStyle,
1219 nullptr, localToDevice)) {
1220 dst.setIsVolatile(true);
1221 // Recurse using the path and new style, while disabling downstream path effect handling
1222 this->drawGeometry(localToDevice, Geometry(Shape(dst)), paint, newStyle,
1223 flags | DrawFlags::kIgnorePathEffect, std::move(primitiveBlender),
1224 skipColorXform);
1225 return;
1226 } else {
1227 SKGPU_LOG_W("Path effect failed to apply, drawing original path.");
1228 this->drawGeometry(localToDevice, geometry, paint, style,
1229 flags | DrawFlags::kIgnorePathEffect, std::move(primitiveBlender),
1230 skipColorXform);
1231 return;
1232 }
1233 }
1234
1235 // TODO: The tessellating and atlas path renderers haven't implemented perspective yet, so
1236 // transform to device space so we draw something approximately correct (barring local coord
1237 // issues).
1238 if (geometry.isShape() && localToDevice.type() == Transform::Type::kPerspective &&
1239 !is_simple_shape(geometry.shape(), style.getStyle())) {
1240 SkPath devicePath = geometry.shape().asPath();
1241 devicePath.transform(localToDevice.matrix().asM33());
1242 devicePath.setIsVolatile(true);
1243 this->drawGeometry(Transform::Identity(), Geometry(Shape(devicePath)), paint, style, flags,
1244 std::move(primitiveBlender), skipColorXform);
1245 return;
1246 }
1247
1248 // TODO: Manually snap pixels for rects, rrects, and lines if paint is non-AA (ideally also
1249 // consider snapping stroke width and/or adjusting geometry for hairlines). This pixel snapping
1250 // math should be consistent with how non-AA clip [r]rects are handled.
1251
1252 // If we got here, then path effects should have been handled and the style should be fill or
1253 // stroke/hairline. Stroke-and-fill is not handled by DrawContext, but is emulated here by
1254 // drawing twice--one stroke and one fill--using the same depth value.
1255 SkASSERT(!SkToBool(paint.getPathEffect()) || (flags & DrawFlags::kIgnorePathEffect));
1256
1257 // TODO: Some renderer decisions could depend on the clip (see PathAtlas::addShape for
1258 // one workaround) so we should figure out how to remove this circular dependency.
1259
1260 // We assume that we will receive a renderer, or a PathAtlas. If it's a PathAtlas,
1261 // then we assume that the renderer chosen in PathAtlas::addShape() will have
1262 // single-channel coverage, require AA bounds outsetting, and have a single renderStep.
1263 auto [renderer, pathAtlas] =
1264 this->chooseRenderer(localToDevice, geometry, style, /*requireMSAA=*/false);
1265 if (!renderer && !pathAtlas) {
1266 SKGPU_LOG_W("Skipping draw with no supported renderer or PathAtlas.");
1267 return;
1268 }
1269
1270 // Calculate the clipped bounds of the draw and determine the clip elements that affect the
1271 // draw without updating the clip stack.
1272 const bool outsetBoundsForAA = renderer ? renderer->outsetBoundsForAA() : true;
1273 ClipStack::ElementList clipElements;
1274 const Clip clip =
1275 fClip.visitClipStackForDraw(localToDevice, geometry, style, outsetBoundsForAA,
1276 &clipElements);
1277 if (clip.isClippedOut()) {
1278 // Clipped out, so don't record anything.
1279 return;
1280 }
1281
1282 // Figure out what dst color requirements we have, if any.
1283 DstReadRequirement dstReadReq = DstReadRequirement::kNone;
1284 const SkBlenderBase* blender = as_BB(paint.getBlender());
1285 const std::optional<SkBlendMode> blendMode = blender ? blender->asBlendMode()
1286 : SkBlendMode::kSrcOver;
1287 Coverage rendererCoverage = renderer ? renderer->coverage()
1288 : Coverage::kSingleChannel;
1289 if ((clip.shader() || !clip.analyticClip().isEmpty()) && rendererCoverage == Coverage::kNone) {
1290 // Must upgrade to single channel coverage if there is a clip shader or analytic clip;
1291 // but preserve LCD coverage if the Renderer uses that.
1292 rendererCoverage = Coverage::kSingleChannel;
1293 }
1294 dstReadReq = GetDstReadRequirement(fRecorder->priv().caps(), blendMode, rendererCoverage);
1295
1296 // A primitive blender should be ignored if there is no primitive color to blend against.
1297 // Additionally, if a renderer emits a primitive color, then a null primitive blender should
1298 // be interpreted as SrcOver blending mode.
1299 if (!renderer || !renderer->emitsPrimitiveColor()) {
1300 primitiveBlender = nullptr;
1301 } else if (!SkToBool(primitiveBlender)) {
1302 primitiveBlender = SkBlender::Mode(SkBlendMode::kSrcOver);
1303 }
1304
1305 PaintParams shading{paint,
1306 std::move(primitiveBlender),
1307 clip.analyticClip(),
1308 sk_ref_sp(clip.shader()),
1309 dstReadReq,
1310 skipColorXform};
1311 const bool dependsOnDst = paint_depends_on_dst(shading) ||
1312 clip.shader() || !clip.analyticClip().isEmpty();
1313
1314 // Some shapes and styles combine multiple draws so the total render step count is split between
1315 // the main renderer and possibly a secondaryRenderer.
1316 SkStrokeRec::Style styleType = style.getStyle();
1317 const Renderer* secondaryRenderer = nullptr;
1318 Rect innerFillBounds = Rect::InfiniteInverted();
1319 if (renderer) {
1320 if (styleType == SkStrokeRec::kStrokeAndFill_Style) {
1321 // `renderer` covers the fill, `secondaryRenderer` covers the stroke
1322 secondaryRenderer = fRecorder->priv().rendererProvider()->tessellatedStrokes();
1323 } else if (style.isFillStyle() && renderer->useNonAAInnerFill() && !dependsOnDst) {
1324 // `renderer` opts into drawing a non-AA inner fill
1325 innerFillBounds = get_inner_bounds(geometry, localToDevice);
1326 if (!innerFillBounds.isEmptyNegativeOrNaN()) {
1327 secondaryRenderer = fRecorder->priv().rendererProvider()->nonAABounds();
1328 }
1329 }
1330 }
1331 const int numNewRenderSteps = (renderer ? renderer->numRenderSteps() : 1) +
1332 (secondaryRenderer ? secondaryRenderer->numRenderSteps() : 0);
1333
1334 // Decide if we have any reason to flush pending work. We want to flush before updating the clip
1335 // state or making any permanent changes to a path atlas, since otherwise clip operations and/or
1336 // atlas entries for the current draw will be flushed.
1337 const bool needsFlush = this->needsFlushBeforeDraw(numNewRenderSteps, dstReadReq);
1338 if (needsFlush) {
1339 if (pathAtlas != nullptr) {
1340 // We need to flush work for all devices associated with the current Recorder.
1341 // Otherwise we may end up with outstanding draws that depend on past atlas state.
1342 fRecorder->priv().flushTrackedDevices();
1343 } else {
1344 this->flushPendingWorkToRecorder();
1345 }
1346 }
1347
1348 // If an atlas path renderer was chosen we need to insert the shape into the atlas and schedule
1349 // it to be drawn.
1350 std::optional<PathAtlas::MaskAndOrigin> atlasMask; // only used if `pathAtlas != nullptr`
1351 if (pathAtlas != nullptr) {
1352 std::tie(renderer, atlasMask) = pathAtlas->addShape(clip.transformedShapeBounds(),
1353 geometry.shape(),
1354 localToDevice,
1355 style);
1356
1357 // If there was no space in the atlas and we haven't flushed already, then flush pending
1358 // work to clear up space in the atlas. If we had already flushed once (which would have
1359 // cleared the atlas) then the atlas is too small for this shape.
1360 if (!atlasMask && !needsFlush) {
1361 // We need to flush work for all devices associated with the current Recorder.
1362 // Otherwise we may end up with outstanding draws that depend on past atlas state.
1363 fRecorder->priv().flushTrackedDevices();
1364
1365 // Try inserting the shape again.
1366 std::tie(renderer, atlasMask) = pathAtlas->addShape(clip.transformedShapeBounds(),
1367 geometry.shape(),
1368 localToDevice,
1369 style);
1370 }
1371
1372 if (!atlasMask) {
1373 SKGPU_LOG_E("Failed to add shape to atlas!");
1374 // TODO(b/285195175): This can happen if the atlas is not large enough or a compatible
1375 // atlas texture cannot be created. Handle the first case in `chooseRenderer` and make
1376 // sure that the atlas path renderer is not chosen if the path is larger than the atlas
1377 // texture.
1378 return;
1379 }
1380 // Since addShape() was successful we should have a valid Renderer now.
1381 SkASSERT(renderer && renderer->numRenderSteps() == 1 && !renderer->emitsPrimitiveColor());
1382 }
1383
1384 #if defined(SK_DEBUG)
1385 // Renderers and their component RenderSteps have flexibility in defining their
1386 // DepthStencilSettings. However, the clipping and ordering managed between Device and ClipStack
1387 // requires that only GREATER or GEQUAL depth tests are used for draws recorded through the
1388 // client-facing, painters-order-oriented API. We assert here vs. in Renderer's constructor to
1389 // allow internal-oriented Renderers that are never selected for a "regular" draw call to have
1390 // more flexibility in their settings.
1391 SkASSERT(renderer);
1392 for (const RenderStep* step : renderer->steps()) {
1393 auto dss = step->depthStencilSettings();
1394 SkASSERT((!step->performsShading() || dss.fDepthTestEnabled) &&
1395 (!dss.fDepthTestEnabled ||
1396 dss.fDepthCompareOp == CompareOp::kGreater ||
1397 dss.fDepthCompareOp == CompareOp::kGEqual));
1398 }
1399 #endif
1400
1401 // Update the clip stack after issuing a flush (if it was needed). A draw will be recorded after
1402 // this point.
1403 DrawOrder order(fCurrentDepth.next());
1404 CompressedPaintersOrder clipOrder = fClip.updateClipStateForDraw(
1405 clip, clipElements, fColorDepthBoundsManager.get(), order.depth());
1406
1407 // A draw's order always depends on the clips that must be drawn before it
1408 order.dependsOnPaintersOrder(clipOrder);
1409 // If a draw is not opaque, it must be drawn after the most recent draw it intersects with in
1410 // order to blend correctly.
1411 if (rendererCoverage != Coverage::kNone || dependsOnDst) {
1412 CompressedPaintersOrder prevDraw =
1413 fColorDepthBoundsManager->getMostRecentDraw(clip.drawBounds());
1414 order.dependsOnPaintersOrder(prevDraw);
1415 }
1416
1417 // Now that the base paint order and draw bounds are finalized, if the Renderer relies on the
1418 // stencil attachment, we compute a secondary sorting field to allow disjoint draws to reorder
1419 // the RenderSteps across draws instead of in sequence for each draw.
1420 if (renderer->depthStencilFlags() & DepthStencilFlags::kStencil) {
1421 DisjointStencilIndex setIndex = fDisjointStencilSet->add(order.paintOrder(),
1422 clip.drawBounds());
1423 order.dependsOnStencil(setIndex);
1424 }
1425
1426 // TODO(b/330864257): This is an extra traversal of all paint effects, that can be avoided when
1427 // the paint key itself is determined inside this function.
1428 shading.notifyImagesInUse(fRecorder, fDC.get());
1429
1430 // If an atlas path renderer was chosen, then record a single CoverageMaskShape draw.
1431 // The shape will be scheduled to be rendered or uploaded into the atlas during the
1432 // next invocation of flushPendingWorkToRecorder().
1433 if (pathAtlas != nullptr) {
1434 // Record the draw as a fill since stroking is handled by the atlas render/upload.
1435 SkASSERT(atlasMask.has_value());
1436 auto [mask, origin] = *atlasMask;
1437 fDC->recordDraw(renderer, Transform::Translate(origin.fX, origin.fY), Geometry(mask),
1438 clip, order, &shading, nullptr);
1439 } else {
1440 if (styleType == SkStrokeRec::kStroke_Style ||
1441 styleType == SkStrokeRec::kHairline_Style ||
1442 styleType == SkStrokeRec::kStrokeAndFill_Style) {
1443 // For stroke-and-fill, 'renderer' is used for the fill and we always use the
1444 // TessellatedStrokes renderer; for stroke and hairline, 'renderer' is used.
1445 StrokeStyle stroke(style.getWidth(), style.getMiter(), style.getJoin(), style.getCap());
1446 fDC->recordDraw(styleType == SkStrokeRec::kStrokeAndFill_Style
1447 ? fRecorder->priv().rendererProvider()->tessellatedStrokes()
1448 : renderer,
1449 localToDevice, geometry, clip, order, &shading, &stroke);
1450 }
1451 if (styleType == SkStrokeRec::kFill_Style ||
1452 styleType == SkStrokeRec::kStrokeAndFill_Style) {
1453 // Possibly record an additional draw using the non-AA bounds renderer to fill the
1454 // interior with a renderer that can disable blending entirely.
1455 if (!innerFillBounds.isEmptyNegativeOrNaN()) {
1456 SkASSERT(!dependsOnDst && renderer->useNonAAInnerFill());
1457 DrawOrder orderWithoutCoverage{order.depth()};
1458 orderWithoutCoverage.dependsOnPaintersOrder(clipOrder);
1459 fDC->recordDraw(fRecorder->priv().rendererProvider()->nonAABounds(),
1460 localToDevice, Geometry(Shape(innerFillBounds)),
1461 clip, orderWithoutCoverage, &shading, nullptr);
1462 // Force the coverage draw to come after the non-AA draw in order to benefit from
1463 // early depth testing.
1464 order.dependsOnPaintersOrder(orderWithoutCoverage.paintOrder());
1465 }
1466 fDC->recordDraw(renderer, localToDevice, geometry, clip, order, &shading, nullptr);
1467 }
1468 }
1469
1470 // Post-draw book keeping (bounds manager, depth tracking, etc.)
1471 fColorDepthBoundsManager->recordDraw(clip.drawBounds(), order.paintOrder());
1472 fCurrentDepth = order.depth();
1473
1474 // TODO(b/238758897): When we enable layer elision that depends on draws not overlapping, we
1475 // can use the `getMostRecentDraw()` query to determine that, although that will mean querying
1476 // even if the draw does not depend on dst (so should be only be used when the Device is an
1477 // elision candidate).
1478 }
1479
drawClipShape(const Transform & localToDevice,const Shape & shape,const Clip & clip,DrawOrder order)1480 void Device::drawClipShape(const Transform& localToDevice,
1481 const Shape& shape,
1482 const Clip& clip,
1483 DrawOrder order) {
1484 // A clip draw's state is almost fully defined by the ClipStack. The only thing we need
1485 // to account for is selecting a Renderer and tracking the stencil buffer usage.
1486 Geometry geometry{shape};
1487 auto [renderer, pathAtlas] = this->chooseRenderer(localToDevice,
1488 geometry,
1489 DefaultFillStyle(),
1490 /*requireMSAA=*/true);
1491 if (!renderer) {
1492 SKGPU_LOG_W("Skipping clip with no supported path renderer.");
1493 return;
1494 } else if (renderer->depthStencilFlags() & DepthStencilFlags::kStencil) {
1495 DisjointStencilIndex setIndex = fDisjointStencilSet->add(order.paintOrder(),
1496 clip.drawBounds());
1497 order.dependsOnStencil(setIndex);
1498 }
1499
1500 // This call represents one of the deferred clip shapes that's already pessimistically counted
1501 // in needsFlushBeforeDraw(), so the DrawContext should have room to add it.
1502 SkASSERT(fDC->pendingRenderSteps() + renderer->numRenderSteps() < DrawList::kMaxRenderSteps);
1503
1504 // Anti-aliased clipping requires the renderer to use MSAA to modify the depth per sample, so
1505 // analytic coverage renderers cannot be used.
1506 SkASSERT(renderer->coverage() == Coverage::kNone && renderer->requiresMSAA());
1507 SkASSERT(pathAtlas == nullptr);
1508
1509 // Clips draws are depth-only (null PaintParams), and filled (null StrokeStyle).
1510 // TODO: Remove this CPU-transform once perspective is supported for all path renderers
1511 if (localToDevice.type() == Transform::Type::kPerspective) {
1512 SkPath devicePath = geometry.shape().asPath();
1513 devicePath.transform(localToDevice.matrix().asM33());
1514 fDC->recordDraw(renderer, Transform::Identity(), Geometry(Shape(devicePath)), clip, order,
1515 nullptr, nullptr);
1516 } else {
1517 fDC->recordDraw(renderer, localToDevice, geometry, clip, order, nullptr, nullptr);
1518 }
1519 // This ensures that draws recorded after this clip shape has been popped off the stack will
1520 // be unaffected by the Z value the clip shape wrote to the depth attachment.
1521 if (order.depth() > fCurrentDepth) {
1522 fCurrentDepth = order.depth();
1523 }
1524 }
1525
1526 // TODO: Currently all Renderers are always defined, but with config options and caps that may not
1527 // be the case, in which case chooseRenderer() will have to go through compatible choices.
chooseRenderer(const Transform & localToDevice,const Geometry & geometry,const SkStrokeRec & style,bool requireMSAA) const1528 std::pair<const Renderer*, PathAtlas*> Device::chooseRenderer(const Transform& localToDevice,
1529 const Geometry& geometry,
1530 const SkStrokeRec& style,
1531 bool requireMSAA) const {
1532 const RendererProvider* renderers = fRecorder->priv().rendererProvider();
1533 SkASSERT(renderers);
1534 SkStrokeRec::Style type = style.getStyle();
1535
1536 if (geometry.isSubRun()) {
1537 SkASSERT(!requireMSAA);
1538 sktext::gpu::RendererData rendererData = geometry.subRunData().rendererData();
1539 if (!rendererData.isSDF) {
1540 return {renderers->bitmapText(rendererData.isLCD, rendererData.maskFormat), nullptr};
1541 }
1542 // Even though the SkPaint can request subpixel rendering, we still need to match
1543 // this with the pixel geometry.
1544 bool useLCD = rendererData.isLCD &&
1545 geometry.subRunData().pixelGeometry() != kUnknown_SkPixelGeometry;
1546 return {renderers->sdfText(useLCD), nullptr};
1547 } else if (geometry.isVertices()) {
1548 SkVerticesPriv info(geometry.vertices()->priv());
1549 return {renderers->vertices(info.mode(), info.hasColors(), info.hasTexCoords()), nullptr};
1550 } else if (geometry.isCoverageMaskShape()) {
1551 // drawCoverageMask() passes in CoverageMaskShapes that reference a provided texture.
1552 // The CoverageMask renderer can also be chosen later on if the shape is assigned to
1553 // to be rendered into the PathAtlas, in which case the 2nd return value is non-null.
1554 return {renderers->coverageMask(), nullptr};
1555 } else if (geometry.isEdgeAAQuad()) {
1556 SkASSERT(!requireMSAA && style.isFillStyle());
1557 // handled by specialized system, simplified from rects and round rects
1558 const EdgeAAQuad& quad = geometry.edgeAAQuad();
1559 if (quad.isRect() && quad.edgeFlags() == EdgeAAQuad::Flags::kNone) {
1560 // For non-AA rectangular quads, it can always use a coverage-less renderer; there's no
1561 // need to check for pixel alignment to avoid popping if MSAA is turned on because quad
1562 // tile edges will seam with each in either mode.
1563 return {renderers->nonAABounds(), nullptr};
1564 } else {
1565 return {renderers->perEdgeAAQuad(), nullptr};
1566 }
1567 } else if (geometry.isAnalyticBlur()) {
1568 return {renderers->analyticBlur(), nullptr};
1569 } else if (!geometry.isShape()) {
1570 // We must account for new Geometry types with specific Renderers
1571 return {nullptr, nullptr};
1572 }
1573
1574 const Shape& shape = geometry.shape();
1575 // We can't use this renderer if we require MSAA for an effect (i.e. clipping or stroke+fill).
1576 if (!requireMSAA && is_simple_shape(shape, type)) {
1577 // For pixel-aligned rects, use the the non-AA bounds renderer to avoid triggering any
1578 // dst-read requirement due to src blending.
1579 bool pixelAlignedRect = false;
1580 if (shape.isRect() && style.isFillStyle() &&
1581 localToDevice.type() <= Transform::Type::kRectStaysRect) {
1582 Rect devRect = localToDevice.mapRect(shape.rect());
1583 pixelAlignedRect = devRect.nearlyEquals(devRect.makeRound());
1584 }
1585
1586 if (shape.isEmpty() || pixelAlignedRect) {
1587 SkASSERT(!shape.isEmpty() || shape.inverted());
1588 return {renderers->nonAABounds(), nullptr};
1589 } else {
1590 return {renderers->analyticRRect(), nullptr};
1591 }
1592 }
1593
1594 if (!requireMSAA && shape.isArc() &&
1595 SkScalarNearlyEqual(shape.arc().oval().width(), shape.arc().oval().height()) &&
1596 SkScalarAbs(shape.arc().sweepAngle()) < 360.f &&
1597 localToDevice.type() <= Transform::Type::kAffine) {
1598 float maxScale, minScale;
1599 std::tie(maxScale, minScale) = localToDevice.scaleFactors({0, 0});
1600 if (SkScalarNearlyEqual(maxScale, minScale)) {
1601 // Arc support depends on the style.
1602 SkStrokeRec::Style recStyle = style.getStyle();
1603 switch (recStyle) {
1604 case SkStrokeRec::kStrokeAndFill_Style:
1605 // This produces a strange result that this op doesn't implement.
1606 break;
1607 case SkStrokeRec::kFill_Style:
1608 return {renderers->circularArc(), nullptr};
1609 case SkStrokeRec::kStroke_Style:
1610 case SkStrokeRec::kHairline_Style:
1611 // Strokes that don't use the center point are supported with butt & round caps.
1612 bool isWedge = shape.arc().isWedge();
1613 bool isSquareCap = style.getCap() == SkPaint::kSquare_Cap;
1614 if (!isWedge && !isSquareCap) {
1615 return {renderers->circularArc(), nullptr};
1616 }
1617 break;
1618 }
1619 }
1620 }
1621
1622 // Path rendering options. For now the strategy is very simple and not optimal:
1623 // I. Use tessellation if MSAA is required for an effect.
1624 // II: otherwise:
1625 // 1. Always use compute AA if supported unless it was excluded by ContextOptions or the
1626 // compute renderer cannot render the shape efficiently yet (based on the result of
1627 // `isSuitableForAtlasing`).
1628 // 2. Fall back to CPU raster AA if hardware MSAA is disabled or it was explicitly requested
1629 // via ContextOptions.
1630 // 3. Otherwise use tessellation.
1631 #if defined(GPU_TEST_UTILS)
1632 PathRendererStrategy strategy = fRecorder->priv().caps()->requestedPathRendererStrategy();
1633 #else
1634 PathRendererStrategy strategy = PathRendererStrategy::kDefault;
1635 #endif
1636
1637 PathAtlas* pathAtlas = nullptr;
1638 AtlasProvider* atlasProvider = fRecorder->priv().atlasProvider();
1639
1640 // Prefer compute atlas draws if supported. This currently implicitly filters out clip draws as
1641 // they require MSAA. Eventually we may want to route clip shapes to the atlas as well but not
1642 // if hardware MSAA is required.
1643 std::optional<Rect> drawBounds;
1644 if (atlasProvider->isAvailable(AtlasProvider::PathAtlasFlags::kCompute) &&
1645 use_compute_atlas_when_available(strategy)) {
1646 PathAtlas* atlas = fDC->getComputePathAtlas(fRecorder);
1647 SkASSERT(atlas);
1648
1649 // Don't use the compute renderer if it can't handle the shape efficiently.
1650 //
1651 // Use the conservative clip bounds for a rough estimate of the mask size (this avoids
1652 // having to evaluate the entire clip stack before choosing the renderer as it will have to
1653 // get evaluated again if we fall back to a different renderer).
1654 drawBounds = localToDevice.mapRect(shape.bounds());
1655 if (atlas->isSuitableForAtlasing(*drawBounds, fClip.conservativeBounds())) {
1656 pathAtlas = atlas;
1657 }
1658 }
1659
1660 // Fall back to CPU rendered paths when multisampling is disabled and the compute atlas is not
1661 // available.
1662 // TODO: enable other uses of the software path renderer
1663 if (!pathAtlas && atlasProvider->isAvailable(AtlasProvider::PathAtlasFlags::kRaster) &&
1664 (strategy == PathRendererStrategy::kRasterAA ||
1665 (strategy == PathRendererStrategy::kDefault && !fMSAASupported))) {
1666 // NOTE: RasterPathAtlas doesn't implement `PathAtlas::isSuitableForAtlasing` as it doesn't
1667 // reject paths (unlike ComputePathAtlas).
1668 pathAtlas = atlasProvider->getRasterPathAtlas();
1669 }
1670
1671 if (!requireMSAA && pathAtlas) {
1672 // If we got here it means that we should draw with an atlas renderer if we can and avoid
1673 // resorting to one of the tessellating techniques.
1674 return {nullptr, pathAtlas};
1675 }
1676
1677 // If we got here, it requires tessellated path rendering or an MSAA technique applied to a
1678 // simple shape (so we interpret them as paths to reduce the number of pipelines we need).
1679
1680 // TODO: All shapes that select a tessellating path renderer need to be "pre-chopped" if they
1681 // are large enough to exceed the fixed count tessellation limits. Fills are pre-chopped to the
1682 // viewport bounds, strokes and stroke-and-fills are pre-chopped to the viewport bounds outset
1683 // by the stroke radius (hence taking the whole style and not just its type).
1684
1685 if (type == SkStrokeRec::kStroke_Style ||
1686 type == SkStrokeRec::kHairline_Style) {
1687 // Unlike in Ganesh, the HW stroke tessellator can work with arbitrary paints since the
1688 // depth test prevents double-blending when there is transparency, thus we can HW stroke
1689 // any path regardless of its paint.
1690 // TODO: We treat inverse-filled strokes as regular strokes. We could handle them by
1691 // stenciling first with the HW stroke tessellator and then covering their bounds, but
1692 // inverse-filled strokes are not well-specified in our public canvas behavior so we may be
1693 // able to remove it.
1694 return {renderers->tessellatedStrokes(), nullptr};
1695 }
1696
1697 // 'type' could be kStrokeAndFill, but in that case chooseRenderer() is meant to return the
1698 // fill renderer since tessellatedStrokes() will always be used for the stroke pass.
1699 if (shape.convex() && !shape.inverted()) {
1700 // TODO: Ganesh doesn't have a curve+middle-out triangles option for convex paths, but it
1701 // would be pretty trivial to spin up.
1702 return {renderers->convexTessellatedWedges(), nullptr};
1703 } else {
1704 if (!drawBounds.has_value()) {
1705 drawBounds = localToDevice.mapRect(shape.bounds());
1706 }
1707 drawBounds->intersect(fClip.conservativeBounds());
1708 const bool preferWedges =
1709 // If the draw bounds don't intersect with the clip stack's conservative bounds,
1710 // we'll be drawing a very small area at most, accounting for coverage, so just
1711 // stick with drawing wedges in that case.
1712 drawBounds->isEmptyNegativeOrNaN() ||
1713
1714 // TODO: Combine this heuristic with what is used in PathStencilCoverOp to choose
1715 // between wedges curves consistently in Graphite and Ganesh.
1716 (shape.isPath() && shape.path().countVerbs() < 50) ||
1717 drawBounds->area() <= (256 * 256);
1718
1719 if (preferWedges) {
1720 return {renderers->stencilTessellatedWedges(shape.fillType()), nullptr};
1721 } else {
1722 return {renderers->stencilTessellatedCurvesAndTris(shape.fillType()), nullptr};
1723 }
1724 }
1725 }
1726
lastDrawTask() const1727 sk_sp<Task> Device::lastDrawTask() const {
1728 SkASSERT(this->isScratchDevice());
1729 return fLastTask;
1730 }
1731
flushPendingWorkToRecorder()1732 void Device::flushPendingWorkToRecorder() {
1733 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1734
1735 // If this is a scratch device being flushed, it should only be flushing into the expected
1736 // next recording from when the Device was first created.
1737 SkASSERT(fRecorder);
1738 SkASSERT(fScopedRecordingID == 0 || fScopedRecordingID == fRecorder->priv().nextRecordingID());
1739
1740 // TODO(b/330864257): flushPendingWorkToRecorder() can be recursively called if this Device
1741 // recorded a picture shader draw and during a flush (triggered by snap or automatically from
1742 // reaching limits), the picture shader will be rendered to a new device. If that picture drawn
1743 // to the temporary device fills up an atlas it can trigger the global
1744 // recorder->flushTrackedDevices(), which will then encounter this device that is already in
1745 // the midst of flushing. To avoid crashing we only actually flush the first time this is called
1746 // and set a bit to early-out on any recursive calls.
1747 // This is not an ideal solution since the temporary Device's flush-the-world may have reset
1748 // atlas entries that the current Device's flushed draws will reference. But at this stage it's
1749 // not possible to split the already recorded draws into a before-list and an after-list that
1750 // can reference the old and new contents of the atlas. While avoiding the crash, this may cause
1751 // incorrect accesses to a shared atlas. Once paint data is extracted at draw time, picture
1752 // shaders will be resolved outside of flushes and then this will be fixed automatically.
1753 if (fIsFlushing) {
1754 return;
1755 } else {
1756 fIsFlushing = true;
1757 }
1758
1759 this->internalFlush();
1760 sk_sp<Task> drawTask = fDC->snapDrawTask(fRecorder);
1761 if (this->isScratchDevice()) {
1762 // TODO(b/323887221): Once shared atlas resources are less brittle, scratch devices won't
1763 // flush to the recorder at all and will only store the snapped task here.
1764 fLastTask = drawTask;
1765 } else {
1766 // Non-scratch devices do not need to point back to the last snapped task since they are
1767 // always added to the root task list.
1768 // TODO: It is currently possible for scratch devices to be flushed and instantiated before
1769 // their work is finished, meaning they will produce additional tasks to be included in
1770 // a follow-up Recording: https://chat.google.com/room/AAAA2HlH94I/YU0XdFqX2Uw.
1771 // However, in this case they no longer appear scratch because the first Recording
1772 // instantiated the targets. When scratch devices are not actually registered with the
1773 // Recorder and are only included when they are drawn (e.g. restored), we should be able to
1774 // assert that `fLastTask` is null.
1775 fLastTask = nullptr;
1776 }
1777
1778 if (drawTask) {
1779 fRecorder->priv().add(std::move(drawTask));
1780
1781 // TODO(b/297344089): This always regenerates mipmaps on the draw target when it's drawn to.
1782 // This could be wasteful if we draw to a target multiple times before reading from it with
1783 // downscaling.
1784 if (fDC->target()->mipmapped() == Mipmapped::kYes) {
1785 if (!GenerateMipmaps(fRecorder, fDC->refTarget(), fDC->colorInfo())) {
1786 SKGPU_LOG_W("Device::flushPendingWorkToRecorder: Failed to generate mipmaps");
1787 }
1788 }
1789 }
1790
1791 fIsFlushing = false;
1792 }
1793
internalFlush()1794 void Device::internalFlush() {
1795 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1796 ASSERT_SINGLE_OWNER
1797
1798 // Push any pending uploads from the atlas provider that pending draws reference.
1799 fRecorder->priv().atlasProvider()->recordUploads(fDC.get());
1800
1801 // Clip shapes are depth-only draws, but aren't recorded in the DrawContext until a flush in
1802 // order to determine the Z values for each element.
1803 fClip.recordDeferredClipDraws();
1804
1805 // Flush all pending items to the internal task list and reset Device tracking state
1806 fDC->flush(fRecorder);
1807
1808 fColorDepthBoundsManager->reset();
1809 fDisjointStencilSet->reset();
1810 fCurrentDepth = DrawOrder::kClearDepth;
1811
1812 // Any cleanup in the AtlasProvider
1813 fRecorder->priv().atlasProvider()->compact(/*forceCompact=*/false);
1814 }
1815
needsFlushBeforeDraw(int numNewRenderSteps,DstReadRequirement dstReadReq) const1816 bool Device::needsFlushBeforeDraw(int numNewRenderSteps, DstReadRequirement dstReadReq) const {
1817 // Must also account for the elements in the clip stack that might need to be recorded.
1818 numNewRenderSteps += fClip.maxDeferredClipDraws() * Renderer::kMaxRenderSteps;
1819 return // Need flush if we don't have room to record into the current list.
1820 (DrawList::kMaxRenderSteps - fDC->pendingRenderSteps()) < numNewRenderSteps ||
1821 // Need flush if this draw needs to copy the dst surface for reading.
1822 dstReadReq == DstReadRequirement::kTextureCopy;
1823 }
1824
drawSpecial(SkSpecialImage * special,const SkMatrix & localToDevice,const SkSamplingOptions & sampling,const SkPaint & paint,SkCanvas::SrcRectConstraint constraint)1825 void Device::drawSpecial(SkSpecialImage* special,
1826 const SkMatrix& localToDevice,
1827 const SkSamplingOptions& sampling,
1828 const SkPaint& paint,
1829 SkCanvas::SrcRectConstraint constraint) {
1830 SkASSERT(!paint.getMaskFilter() && !paint.getImageFilter());
1831
1832 sk_sp<SkImage> img = special->asImage();
1833 if (!img || !as_IB(img)->isGraphiteBacked()) {
1834 SKGPU_LOG_W("Couldn't get Graphite-backed special image as image");
1835 return;
1836 }
1837
1838 SkPaint paintWithShader(paint);
1839 SkRect dst = SkModifyPaintAndDstForDrawImageRect(
1840 img.get(),
1841 sampling,
1842 /*src=*/SkRect::Make(special->subset()),
1843 /*dst=*/SkRect::MakeIWH(special->width(), special->height()),
1844 /*strictSrcSubset=*/constraint == SkCanvas::kStrict_SrcRectConstraint,
1845 &paintWithShader);
1846 if (dst.isEmpty()) {
1847 return;
1848 }
1849
1850 // The image filtering and layer code paths often rely on the paint being non-AA to avoid
1851 // coverage operations. To stay consistent with the other backends, we use an edge AA "quad"
1852 // whose flags match the paint's AA request.
1853 EdgeAAQuad::Flags aaFlags = paint.isAntiAlias() ? EdgeAAQuad::Flags::kAll
1854 : EdgeAAQuad::Flags::kNone;
1855 this->drawGeometry(Transform(SkM44(localToDevice)),
1856 Geometry(EdgeAAQuad(dst, aaFlags)),
1857 paintWithShader,
1858 DefaultFillStyle(),
1859 DrawFlags::kIgnorePathEffect);
1860 }
1861
drawCoverageMask(const SkSpecialImage * mask,const SkMatrix & localToDevice,const SkSamplingOptions & sampling,const SkPaint & paint)1862 void Device::drawCoverageMask(const SkSpecialImage* mask,
1863 const SkMatrix& localToDevice,
1864 const SkSamplingOptions& sampling,
1865 const SkPaint& paint) {
1866 CoverageMaskShape::MaskInfo maskInfo{/*fTextureOrigin=*/{SkTo<uint16_t>(mask->subset().fLeft),
1867 SkTo<uint16_t>(mask->subset().fTop)},
1868 /*fMaskSize=*/{SkTo<uint16_t>(mask->width()),
1869 SkTo<uint16_t>(mask->height())}};
1870
1871 auto maskProxyView = AsView(mask->asImage());
1872 if (!maskProxyView) {
1873 SKGPU_LOG_W("Couldn't get Graphite-backed special image as texture proxy view");
1874 return;
1875 }
1876
1877 // Every other "Image" draw reaches the underlying texture via AddToKey/NotifyInUse, which
1878 // handles notifying the image and either flushing the linked surface or attaching draw tasks
1879 // from a scratch device to the current draw context. In this case, 'mask' is very likely to
1880 // be linked to a scratch device, but we must perform the same notifyInUse manually here because
1881 // the texture is consumed by the RenderStep and not part of the PaintParams.
1882 static_cast<Image_Base*>(mask->asImage().get())->notifyInUse(fRecorder, fDC.get());
1883
1884 // 'mask' logically has 0 coverage outside of its pixels, which is equivalent to kDecal tiling.
1885 // However, since we draw geometry tightly fitting 'mask', we can use the better-supported
1886 // kClamp tiling and behave effectively the same way.
1887 TextureDataBlock::SampledTexture sampledMask{maskProxyView.refProxy(),
1888 {SkFilterMode::kLinear, SkTileMode::kClamp}};
1889 // Ensure this is kept alive; normally textures are kept alive by the PipelineDataGatherer for
1890 // image shaders, or by the PathAtlas. This is a unique circumstance.
1891 // NOTE: CoverageMaskRenderStep controls the final sampling options; this texture data block
1892 // serves only to keep the mask alive so the sampling passed to add() doesn't matter.
1893 fRecorder->priv().textureDataCache()->insert(TextureDataBlock(sampledMask));
1894
1895 // CoverageMaskShape() wraps a Shape when it's used as a PathAtlas, but in this case the
1896 // original shape has been long lost, so just use a Rect that bounds the image.
1897 CoverageMaskShape maskShape{Shape{Rect::WH((float)mask->width(), (float)mask->height())},
1898 maskProxyView.proxy(),
1899 // Use the active local-to-device transform for this since it
1900 // determines the local coords for evaluating the skpaint, whereas
1901 // the provided 'localToDevice' just places the coverage mask.
1902 this->localToDeviceTransform().inverse(),
1903 maskInfo};
1904
1905 this->drawGeometry(Transform(SkM44(localToDevice)),
1906 Geometry(maskShape),
1907 paint,
1908 DefaultFillStyle(),
1909 DrawFlags::kIgnorePathEffect);
1910 }
1911
makeSpecial(const SkBitmap &)1912 sk_sp<SkSpecialImage> Device::makeSpecial(const SkBitmap&) {
1913 return nullptr;
1914 }
1915
makeSpecial(const SkImage *)1916 sk_sp<SkSpecialImage> Device::makeSpecial(const SkImage*) {
1917 return nullptr;
1918 }
1919
snapSpecial(const SkIRect & subset,bool forceCopy)1920 sk_sp<SkSpecialImage> Device::snapSpecial(const SkIRect& subset, bool forceCopy) {
1921 // NOTE: snapSpecial() can be called even after the device has been marked immutable (null
1922 // recorder), but in those cases it should not be a copy and just returns the image view.
1923 sk_sp<Image> deviceImage;
1924 SkIRect finalSubset;
1925 if (forceCopy || !this->readSurfaceView() || this->readSurfaceView().proxy()->isFullyLazy()) {
1926 deviceImage = this->makeImageCopy(
1927 subset, Budgeted::kYes, Mipmapped::kNo, SkBackingFit::kApprox);
1928 finalSubset = SkIRect::MakeSize(subset.size());
1929 } else {
1930 // TODO(b/323886870): For now snapSpecial() force adds the pending work to the recorder's
1931 // root task list. Once shared atlas management is solved and DrawTasks can be nested in a
1932 // graph then this can go away in favor of auto-flushing through the image's linked device.
1933 if (fRecorder) {
1934 this->flushPendingWorkToRecorder();
1935 }
1936 deviceImage = Image::WrapDevice(sk_ref_sp(this));
1937 finalSubset = subset;
1938 }
1939
1940 if (!deviceImage) {
1941 return nullptr;
1942 }
1943
1944 // For non-copying "snapSpecial", the semantics are returning an image view of the surface data,
1945 // and relying on higher-level draw and restore logic for the contents to make sense.
1946 return SkSpecialImages::MakeGraphite(
1947 fRecorder, finalSubset, std::move(deviceImage), this->surfaceProps());
1948 }
1949
createImageFilteringBackend(const SkSurfaceProps & surfaceProps,SkColorType colorType) const1950 sk_sp<skif::Backend> Device::createImageFilteringBackend(const SkSurfaceProps& surfaceProps,
1951 SkColorType colorType) const {
1952 return skif::MakeGraphiteBackend(fRecorder, surfaceProps, colorType);
1953 }
1954
target()1955 TextureProxy* Device::target() { return fDC->target(); }
1956
readSurfaceView() const1957 TextureProxyView Device::readSurfaceView() const { return fDC->readSurfaceView(); }
1958
isScratchDevice() const1959 bool Device::isScratchDevice() const {
1960 // Scratch device status is inferred from whether or not the Device's target is instantiated.
1961 // By default devices start out un-instantiated unless they are wrapping an existing backend
1962 // texture (definitely not a scratch scenario), or Surface explicitly instantiates the target
1963 // before returning to the client (not a scratch scenario).
1964 //
1965 // Scratch device targets are instantiated during the prepareResources() phase of
1966 // Recorder::snap(). Truly scratch devices that have gone out of scope as intended will have
1967 // already been destroyed at this point. Scratch devices that become longer-lived (linked to
1968 // a client-owned object) automatically transition to non-scratch usage.
1969 return !fDC->target()->isInstantiated() && !fDC->target()->isLazy();
1970 }
1971
convertGlyphRunListToSlug(const sktext::GlyphRunList & glyphRunList,const SkPaint & paint)1972 sk_sp<sktext::gpu::Slug> Device::convertGlyphRunListToSlug(const sktext::GlyphRunList& glyphRunList,
1973 const SkPaint& paint) {
1974 return sktext::gpu::SlugImpl::Make(this->localToDevice(),
1975 glyphRunList,
1976 paint,
1977 this->strikeDeviceInfo(),
1978 SkStrikeCache::GlobalStrikeCache());
1979 }
1980
drawSlug(SkCanvas * canvas,const sktext::gpu::Slug * slug,const SkPaint & paint)1981 void Device::drawSlug(SkCanvas* canvas, const sktext::gpu::Slug* slug, const SkPaint& paint) {
1982 auto slugImpl = static_cast<const sktext::gpu::SlugImpl*>(slug);
1983 slugImpl->subRuns()->draw(canvas, slugImpl->origin(), paint, slugImpl, this->atlasDelegate());
1984 }
1985
drawBlurredRRect(const SkRRect & rrect,const SkPaint & paint,float deviceSigma)1986 bool Device::drawBlurredRRect(const SkRRect& rrect, const SkPaint& paint, float deviceSigma) {
1987 SkStrokeRec style(paint);
1988 if (skgpu::BlurIsEffectivelyIdentity(deviceSigma)) {
1989 this->drawGeometry(this->localToDeviceTransform(),
1990 Geometry(rrect.isRect() ? Shape(rrect.rect()) : Shape(rrect)),
1991 paint,
1992 style);
1993 return true;
1994 }
1995
1996 std::optional<AnalyticBlurMask> analyticBlur = AnalyticBlurMask::Make(
1997 this->recorder(), this->localToDeviceTransform(), deviceSigma, rrect);
1998 if (!analyticBlur) {
1999 return false;
2000 }
2001
2002 this->drawGeometry(this->localToDeviceTransform(), Geometry(*analyticBlur), paint, style);
2003 return true;
2004 }
2005
2006 } // namespace skgpu::graphite
2007