1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "src/gpu/ganesh/ops/AAConvexPathRenderer.h"
8
9 #include "include/core/SkMatrix.h"
10 #include "include/core/SkPath.h"
11 #include "include/core/SkPoint.h"
12 #include "include/core/SkRefCnt.h"
13 #include "include/core/SkScalar.h"
14 #include "include/core/SkString.h"
15 #include "include/core/SkTypes.h"
16 #include "include/gpu/ganesh/GrRecordingContext.h"
17 #include "include/private/SkColorData.h"
18 #include "include/private/base/SkDebug.h"
19 #include "include/private/base/SkFloatingPoint.h"
20 #include "include/private/base/SkMath.h"
21 #include "include/private/base/SkTArray.h"
22 #include "include/private/base/SkTDArray.h"
23 #include "include/private/gpu/ganesh/GrTypesPriv.h"
24 #include "src/base/SkArenaAlloc.h"
25 #include "src/base/SkTLazy.h"
26 #include "src/core/SkGeometry.h"
27 #include "src/core/SkMatrixPriv.h"
28 #include "src/core/SkPathEnums.h"
29 #include "src/core/SkPathPriv.h"
30 #include "src/core/SkPointPriv.h"
31 #include "src/core/SkSLTypeShared.h"
32 #include "src/gpu/BufferWriter.h"
33 #include "src/gpu/KeyBuilder.h"
34 #include "src/gpu/ganesh/GrAppliedClip.h"
35 #include "src/gpu/ganesh/GrAuditTrail.h"
36 #include "src/gpu/ganesh/GrBuffer.h"
37 #include "src/gpu/ganesh/GrCaps.h"
38 #include "src/gpu/ganesh/GrDrawOpTest.h"
39 #include "src/gpu/ganesh/GrGeometryProcessor.h"
40 #include "src/gpu/ganesh/GrMeshDrawTarget.h"
41 #include "src/gpu/ganesh/GrOpFlushState.h"
42 #include "src/gpu/ganesh/GrPaint.h"
43 #include "src/gpu/ganesh/GrProcessorAnalysis.h"
44 #include "src/gpu/ganesh/GrProcessorSet.h"
45 #include "src/gpu/ganesh/GrProcessorUnitTest.h"
46 #include "src/gpu/ganesh/GrProgramInfo.h"
47 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
48 #include "src/gpu/ganesh/GrShaderCaps.h"
49 #include "src/gpu/ganesh/GrSimpleMesh.h"
50 #include "src/gpu/ganesh/GrStyle.h"
51 #include "src/gpu/ganesh/SurfaceDrawContext.h"
52 #include "src/gpu/ganesh/geometry/GrPathUtils.h"
53 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
54 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
55 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
56 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
57 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
58 #include "src/gpu/ganesh/ops/GrOp.h"
59 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
60
61 #if defined(GPU_TEST_UTILS)
62 #include "src/base/SkRandom.h"
63 #include "src/gpu/ganesh/GrTestUtils.h"
64 #endif
65
66 #include <cstddef>
67 #include <cstdint>
68 #include <memory>
69 #include <utility>
70
71 class GrDstProxyView;
72 class GrGLSLProgramDataManager;
73 class GrGLSLUniformHandler;
74 class GrSurfaceProxyView;
75 enum class GrXferBarrierFlags;
76 struct GrUserStencilSettings;
77 struct SkRect;
78
79 using namespace skia_private;
80
81 namespace skgpu::ganesh {
82
83 namespace {
84
85 struct Segment {
86 enum {
87 kLine = 0,
88 kQuad = 1,
89 } fType;
90 // These enum values are assumed in member functions below.
91 static_assert(0 == kLine && 1 == kQuad);
92
93 // line uses one pt, quad uses 2 pts
94 SkPoint fPts[2];
95 // normal to edge ending at each pt
96 SkVector fNorms[2];
97 // is the corner where the previous segment meets this segment
98 // sharp. If so, fMid is a normalized bisector facing outward.
99 SkVector fMid;
100
countPointsskgpu::ganesh::__anon033b8e600111::Segment101 int countPoints() const {
102 SkASSERT(fType == kLine || fType == kQuad);
103 return fType + 1;
104 }
endPtskgpu::ganesh::__anon033b8e600111::Segment105 const SkPoint& endPt() const {
106 SkASSERT(fType == kLine || fType == kQuad);
107 return fPts[fType];
108 }
endNormskgpu::ganesh::__anon033b8e600111::Segment109 const SkPoint& endNorm() const {
110 SkASSERT(fType == kLine || fType == kQuad);
111 return fNorms[fType];
112 }
113 };
114
115 typedef TArray<Segment, true> SegmentArray;
116
center_of_mass(const SegmentArray & segments,SkPoint * c)117 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
118 SkScalar area = 0;
119 SkPoint center = {0, 0};
120 int count = segments.size();
121 if (count <= 0) {
122 return false;
123 }
124 SkPoint p0 = {0, 0};
125 if (count > 2) {
126 // We translate the polygon so that the first point is at the origin.
127 // This avoids some precision issues with small area polygons far away
128 // from the origin.
129 p0 = segments[0].endPt();
130 SkPoint pi;
131 SkPoint pj;
132 // the first and last iteration of the below loop would compute
133 // zeros since the starting / ending point is (0,0). So instead we start
134 // at i=1 and make the last iteration i=count-2.
135 pj = segments[1].endPt() - p0;
136 for (int i = 1; i < count - 1; ++i) {
137 pi = pj;
138 pj = segments[i + 1].endPt() - p0;
139
140 SkScalar t = SkPoint::CrossProduct(pi, pj);
141 area += t;
142 center.fX += (pi.fX + pj.fX) * t;
143 center.fY += (pi.fY + pj.fY) * t;
144 }
145 }
146
147 // If the poly has no area then we instead return the average of
148 // its points.
149 if (SkScalarNearlyZero(area)) {
150 SkPoint avg;
151 avg.set(0, 0);
152 for (int i = 0; i < count; ++i) {
153 const SkPoint& pt = segments[i].endPt();
154 avg.fX += pt.fX;
155 avg.fY += pt.fY;
156 }
157 SkScalar denom = SK_Scalar1 / count;
158 avg.scale(denom);
159 *c = avg;
160 } else {
161 area *= 3;
162 area = SkScalarInvert(area);
163 center.scale(area);
164 // undo the translate of p0 to the origin.
165 *c = center + p0;
166 }
167 return !SkIsNaN(c->fX) && !SkIsNaN(c->fY) && c->isFinite();
168 }
169
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathFirstDirection dir,int * vCount,int * iCount)170 bool compute_vectors(SegmentArray* segments,
171 SkPoint* fanPt,
172 SkPathFirstDirection dir,
173 int* vCount,
174 int* iCount) {
175 if (!center_of_mass(*segments, fanPt)) {
176 return false;
177 }
178 int count = segments->size();
179
180 // Make the normals point towards the outside
181 SkPointPriv::Side normSide;
182 if (dir == SkPathFirstDirection::kCCW) {
183 normSide = SkPointPriv::kRight_Side;
184 } else {
185 normSide = SkPointPriv::kLeft_Side;
186 }
187
188 int64_t vCount64 = 0;
189 int64_t iCount64 = 0;
190 // compute normals at all points
191 for (int a = 0; a < count; ++a) {
192 Segment& sega = (*segments)[a];
193 int b = (a + 1) % count;
194 Segment& segb = (*segments)[b];
195
196 const SkPoint* prevPt = &sega.endPt();
197 int n = segb.countPoints();
198 for (int p = 0; p < n; ++p) {
199 segb.fNorms[p] = segb.fPts[p] - *prevPt;
200 segb.fNorms[p].normalize();
201 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
202 prevPt = &segb.fPts[p];
203 }
204 if (Segment::kLine == segb.fType) {
205 vCount64 += 5;
206 iCount64 += 9;
207 } else {
208 vCount64 += 6;
209 iCount64 += 12;
210 }
211 }
212
213 // compute mid-vectors where segments meet. TODO: Detect shallow corners
214 // and leave out the wedges and close gaps by stitching segments together.
215 for (int a = 0; a < count; ++a) {
216 const Segment& sega = (*segments)[a];
217 int b = (a + 1) % count;
218 Segment& segb = (*segments)[b];
219 segb.fMid = segb.fNorms[0] + sega.endNorm();
220 segb.fMid.normalize();
221 // corner wedges
222 vCount64 += 4;
223 iCount64 += 6;
224 }
225 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
226 return false;
227 }
228 *vCount = vCount64;
229 *iCount = iCount64;
230 return true;
231 }
232
233 struct DegenerateTestData {
isDegenerateskgpu::ganesh::__anon033b8e600111::DegenerateTestData234 bool isDegenerate() const { return Stage::kNonDegenerate != fStage; }
235 enum class Stage {
236 kInitial,
237 kPoint,
238 kLine,
239 kNonDegenerate,
240 };
241 Stage fStage = Stage::kInitial;
242 SkPoint fFirstPoint;
243 SkVector fLineNormal;
244 SkScalar fLineC;
245 };
246
247 static const SkScalar kClose = (SK_Scalar1 / 16);
248 static const SkScalar kCloseSqd = kClose * kClose;
249
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)250 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
251 switch (data->fStage) {
252 case DegenerateTestData::Stage::kInitial:
253 data->fFirstPoint = pt;
254 data->fStage = DegenerateTestData::Stage::kPoint;
255 break;
256 case DegenerateTestData::Stage::kPoint:
257 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
258 data->fLineNormal = pt - data->fFirstPoint;
259 data->fLineNormal.normalize();
260 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
261 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
262 data->fStage = DegenerateTestData::Stage::kLine;
263 }
264 break;
265 case DegenerateTestData::Stage::kLine:
266 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
267 data->fStage = DegenerateTestData::Stage::kNonDegenerate;
268 }
269 break;
270 case DegenerateTestData::Stage::kNonDegenerate:
271 break;
272 default:
273 SK_ABORT("Unexpected degenerate test stage.");
274 }
275 }
276
get_direction(const SkPath & path,const SkMatrix & m,SkPathFirstDirection * dir)277 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
278 // At this point, we've already returned true from canDraw(), which checked that the path's
279 // direction could be determined, so this should just be fetching the cached direction.
280 // However, if perspective is involved, we're operating on a transformed path, which may no
281 // longer have a computable direction.
282 *dir = SkPathPriv::ComputeFirstDirection(path);
283 if (*dir == SkPathFirstDirection::kUnknown) {
284 return false;
285 }
286
287 // check whether m reverses the orientation
288 SkASSERT(!m.hasPerspective());
289 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
290 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
291 if (det2x2 < 0) {
292 *dir = SkPathPriv::OppositeFirstDirection(*dir);
293 }
294
295 return true;
296 }
297
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)298 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
299 segments->push_back();
300 segments->back().fType = Segment::kLine;
301 segments->back().fPts[0] = pt;
302 }
303
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)304 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
305 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
306 if (pts[0] != pts[2]) {
307 add_line_to_segment(pts[2], segments);
308 }
309 } else {
310 segments->push_back();
311 segments->back().fType = Segment::kQuad;
312 segments->back().fPts[0] = pts[1];
313 segments->back().fPts[1] = pts[2];
314 }
315 }
316
add_cubic_segments(const SkPoint pts[4],SkPathFirstDirection dir,SegmentArray * segments)317 inline void add_cubic_segments(const SkPoint pts[4],
318 SkPathFirstDirection dir,
319 SegmentArray* segments) {
320 STArray<15, SkPoint, true> quads;
321 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
322 int count = quads.size();
323 for (int q = 0; q < count; q += 3) {
324 add_quad_segment(&quads[q], segments);
325 }
326 }
327
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)328 bool get_segments(const SkPath& path,
329 const SkMatrix& m,
330 SegmentArray* segments,
331 SkPoint* fanPt,
332 int* vCount,
333 int* iCount) {
334 SkPath::Iter iter(path, true);
335 // This renderer over-emphasizes very thin path regions. We use the distance
336 // to the path from the sample to compute coverage. Every pixel intersected
337 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
338 // notice that the sample may be close to a very thin area of the path and
339 // thus should be very light. This is particularly egregious for degenerate
340 // line paths. We detect paths that are very close to a line (zero area) and
341 // draw nothing.
342 DegenerateTestData degenerateData;
343 SkPathFirstDirection dir;
344 if (!get_direction(path, m, &dir)) {
345 return false;
346 }
347
348 for (;;) {
349 SkPoint pts[4];
350 SkPath::Verb verb = iter.next(pts);
351 switch (verb) {
352 case SkPath::kMove_Verb:
353 m.mapPoints(pts, 1);
354 update_degenerate_test(°enerateData, pts[0]);
355 break;
356 case SkPath::kLine_Verb: {
357 if (!SkPathPriv::AllPointsEq(pts, 2)) {
358 m.mapPoints(&pts[1], 1);
359 update_degenerate_test(°enerateData, pts[1]);
360 add_line_to_segment(pts[1], segments);
361 }
362 break;
363 }
364 case SkPath::kQuad_Verb:
365 if (!SkPathPriv::AllPointsEq(pts, 3)) {
366 m.mapPoints(pts, 3);
367 update_degenerate_test(°enerateData, pts[1]);
368 update_degenerate_test(°enerateData, pts[2]);
369 add_quad_segment(pts, segments);
370 }
371 break;
372 case SkPath::kConic_Verb: {
373 if (!SkPathPriv::AllPointsEq(pts, 3)) {
374 m.mapPoints(pts, 3);
375 SkScalar weight = iter.conicWeight();
376 SkAutoConicToQuads converter;
377 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
378 for (int i = 0; i < converter.countQuads(); ++i) {
379 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
380 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
381 add_quad_segment(quadPts + 2*i, segments);
382 }
383 }
384 break;
385 }
386 case SkPath::kCubic_Verb: {
387 if (!SkPathPriv::AllPointsEq(pts, 4)) {
388 m.mapPoints(pts, 4);
389 update_degenerate_test(°enerateData, pts[1]);
390 update_degenerate_test(°enerateData, pts[2]);
391 update_degenerate_test(°enerateData, pts[3]);
392 add_cubic_segments(pts, dir, segments);
393 }
394 break;
395 }
396 case SkPath::kDone_Verb:
397 if (degenerateData.isDegenerate()) {
398 return false;
399 } else {
400 return compute_vectors(segments, fanPt, dir, vCount, iCount);
401 }
402 default:
403 break;
404 }
405 }
406 }
407
408 struct Draw {
Drawskgpu::ganesh::__anon033b8e600111::Draw409 Draw() : fVertexCnt(0), fIndexCnt(0) {}
410 int fVertexCnt;
411 int fIndexCnt;
412 };
413
414 typedef TArray<Draw, true> DrawArray;
415
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,const VertexColor & color,DrawArray * draws,VertexWriter & verts,uint16_t * idxs,size_t vertexStride)416 void create_vertices(const SegmentArray& segments,
417 const SkPoint& fanPt,
418 const VertexColor& color,
419 DrawArray* draws,
420 VertexWriter& verts,
421 uint16_t* idxs,
422 size_t vertexStride) {
423 Draw* draw = &draws->push_back();
424 // alias just to make vert/index assignments easier to read.
425 int* v = &draw->fVertexCnt;
426 int* i = &draw->fIndexCnt;
427
428 int count = segments.size();
429 for (int a = 0; a < count; ++a) {
430 const Segment& sega = segments[a];
431 int b = (a + 1) % count;
432 const Segment& segb = segments[b];
433
434 // Check whether adding the verts for this segment to the current draw would cause index
435 // values to overflow.
436 int vCount = 4;
437 if (Segment::kLine == segb.fType) {
438 vCount += 5;
439 } else {
440 vCount += 6;
441 }
442 if (draw->fVertexCnt + vCount > (1 << 16)) {
443 idxs += *i;
444 draw = &draws->push_back();
445 v = &draw->fVertexCnt;
446 i = &draw->fIndexCnt;
447 }
448
449 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
450
451 // FIXME: These tris are inset in the 1 unit arc around the corner
452 SkPoint p0 = sega.endPt();
453 // Position, Color, UV, D0, D1
454 verts << p0 << color << SkPoint{0, 0} << negOneDists;
455 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
456 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
457 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
458
459 idxs[*i + 0] = *v + 0;
460 idxs[*i + 1] = *v + 2;
461 idxs[*i + 2] = *v + 1;
462 idxs[*i + 3] = *v + 0;
463 idxs[*i + 4] = *v + 3;
464 idxs[*i + 5] = *v + 2;
465
466 *v += 4;
467 *i += 6;
468
469 if (Segment::kLine == segb.fType) {
470 // we draw the line edge as a degenerate quad (u is 0, v is the
471 // signed distance to the edge)
472 SkPoint v1Pos = sega.endPt();
473 SkPoint v2Pos = segb.fPts[0];
474 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
475
476 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
477 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
478 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
479 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
480 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
481
482 idxs[*i + 0] = *v + 3;
483 idxs[*i + 1] = *v + 1;
484 idxs[*i + 2] = *v + 2;
485
486 idxs[*i + 3] = *v + 4;
487 idxs[*i + 4] = *v + 3;
488 idxs[*i + 5] = *v + 2;
489
490 *i += 6;
491
492 // Draw the interior fan if it exists.
493 // TODO: Detect and combine colinear segments. This will ensure we catch every case
494 // with no interior, and that the resulting shared edge uses the same endpoints.
495 if (count >= 3) {
496 idxs[*i + 0] = *v + 0;
497 idxs[*i + 1] = *v + 2;
498 idxs[*i + 2] = *v + 1;
499
500 *i += 3;
501 }
502
503 *v += 5;
504 } else {
505 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
506
507 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
508 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
509
510 // We must transform the positions into UV in cpu memory and then copy them to the gpu
511 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
512 // will cause us to read from the GPU buffer which can be very slow.
513 struct PosAndUV {
514 SkPoint fPos;
515 SkPoint fUV;
516 };
517 PosAndUV posAndUVPoints[6];
518 posAndUVPoints[0].fPos = fanPt;
519 posAndUVPoints[1].fPos = qpts[0];
520 posAndUVPoints[2].fPos = qpts[2];
521 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
522 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
523 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
524 midVec.normalize();
525 posAndUVPoints[5].fPos = qpts[1] + midVec;
526
527 GrPathUtils::QuadUVMatrix toUV(qpts);
528 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
529
530 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
531 << (-segb.fNorms[0].dot(fanPt) + c0)
532 << (-segb.fNorms[1].dot(fanPt) + c1);
533
534 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
535 << 0.0f
536 << (-segb.fNorms[1].dot(qpts[0]) + c1);
537
538 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
539 << (-segb.fNorms[0].dot(qpts[2]) + c0)
540 << 0.0f;
541 // We need a negative value that is very large that it won't effect results if it is
542 // interpolated with. However, the value can't be too large of a negative that it
543 // effects numerical precision on less powerful GPUs.
544 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
545 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
546 << kStableLargeNegativeValue
547 << kStableLargeNegativeValue;
548
549 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
550 << kStableLargeNegativeValue
551 << kStableLargeNegativeValue;
552
553 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
554 << kStableLargeNegativeValue
555 << kStableLargeNegativeValue;
556
557 idxs[*i + 0] = *v + 3;
558 idxs[*i + 1] = *v + 1;
559 idxs[*i + 2] = *v + 2;
560 idxs[*i + 3] = *v + 4;
561 idxs[*i + 4] = *v + 3;
562 idxs[*i + 5] = *v + 2;
563
564 idxs[*i + 6] = *v + 5;
565 idxs[*i + 7] = *v + 3;
566 idxs[*i + 8] = *v + 4;
567
568 *i += 9;
569
570 // Draw the interior fan if it exists.
571 // TODO: Detect and combine colinear segments. This will ensure we catch every case
572 // with no interior, and that the resulting shared edge uses the same endpoints.
573 if (count >= 3) {
574 idxs[*i + 0] = *v + 0;
575 idxs[*i + 1] = *v + 2;
576 idxs[*i + 2] = *v + 1;
577
578 *i += 3;
579 }
580
581 *v += 6;
582 }
583 }
584 }
585
586 ///////////////////////////////////////////////////////////////////////////////
587
588 /*
589 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
590 * two components of the vertex attribute. Coverage is based on signed
591 * distance with negative being inside, positive outside. The edge is specified in
592 * window space (y-down). If either the third or fourth component of the interpolated
593 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
594 * attempt to trim to a portion of the infinite quad.
595 * Requires shader derivative instruction support.
596 */
597
598 class QuadEdgeEffect : public GrGeometryProcessor {
599 public:
Make(SkArenaAlloc * arena,const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)600 static GrGeometryProcessor* Make(SkArenaAlloc* arena,
601 const SkMatrix& localMatrix,
602 bool usesLocalCoords,
603 bool wideColor) {
604 return arena->make([&](void* ptr) {
605 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
606 });
607 }
608
~QuadEdgeEffect()609 ~QuadEdgeEffect() override {}
610
name() const611 const char* name() const override { return "QuadEdge"; }
612
addToKey(const GrShaderCaps & caps,KeyBuilder * b) const613 void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
614 b->addBool(fUsesLocalCoords, "usesLocalCoords");
615 b->addBits(ProgramImpl::kMatrixKeyBits,
616 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
617 "localMatrixType");
618 }
619
620 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
621
622 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)623 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
624 : INHERITED(kQuadEdgeEffect_ClassID)
625 , fLocalMatrix(localMatrix)
626 , fUsesLocalCoords(usesLocalCoords) {
627 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
628 fInColor = MakeColorAttribute("inColor", wideColor);
629 // GL on iOS 14 needs more precision for the quadedge attributes
630 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
631 this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
632 }
633
634 Attribute fInPosition;
635 Attribute fInColor;
636 Attribute fInQuadEdge;
637
638 SkMatrix fLocalMatrix;
639 bool fUsesLocalCoords;
640
641 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
642
643 using INHERITED = GrGeometryProcessor;
644 };
645
makeProgramImpl(const GrShaderCaps &) const646 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
647 const GrShaderCaps&) const {
648 class Impl : public ProgramImpl {
649 public:
650 void setData(const GrGLSLProgramDataManager& pdman,
651 const GrShaderCaps& shaderCaps,
652 const GrGeometryProcessor& geomProc) override {
653 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
654 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
655 }
656
657 private:
658 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
659 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
660 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
661 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
662 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
663 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
664
665 // emit attributes
666 varyingHandler->emitAttributes(qe);
667
668 // GL on iOS 14 needs more precision for the quadedge attributes
669 // We might as well enable it everywhere
670 GrGLSLVarying v(SkSLType::kFloat4);
671 varyingHandler->addVarying("QuadEdge", &v);
672 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
673
674 // Setup pass through color
675 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
676 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
677
678 // Setup position
679 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
680 if (qe.fUsesLocalCoords) {
681 WriteLocalCoord(vertBuilder,
682 uniformHandler,
683 *args.fShaderCaps,
684 gpArgs,
685 qe.fInPosition.asShaderVar(),
686 qe.fLocalMatrix,
687 &fLocalMatrixUniform);
688 }
689
690 fragBuilder->codeAppendf("half edgeAlpha;");
691
692 // keep the derivative instructions outside the conditional
693 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
694 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
695 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
696 // today we know z and w are in device space. We could use derivatives
697 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
698 v.fsIn());
699 fragBuilder->codeAppendf ("} else {");
700 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
701 " half(2.0*%s.x*duvdy.x - duvdy.y));",
702 v.fsIn(), v.fsIn());
703 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
704 v.fsIn());
705 fragBuilder->codeAppendf("edgeAlpha = "
706 "saturate(0.5 - edgeAlpha / length(gF));}");
707
708 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
709 }
710
711 private:
712 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
713
714 UniformHandle fLocalMatrixUniform;
715 };
716
717 return std::make_unique<Impl>();
718 }
719
GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)720 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)
721
722 #if defined(GPU_TEST_UTILS)
723 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
724 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
725 bool usesLocalCoords = d->fRandom->nextBool();
726 bool wideColor = d->fRandom->nextBool();
727 // Doesn't work without derivative instructions.
728 return d->caps()->shaderCaps()->fShaderDerivativeSupport
729 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
730 : nullptr;
731 }
732 #endif
733
734 class AAConvexPathOp final : public GrMeshDrawOp {
735 private:
736 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
737
738 public:
739 DEFINE_OP_CLASS_ID
740
Make(GrRecordingContext * context,GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)741 static GrOp::Owner Make(GrRecordingContext* context,
742 GrPaint&& paint,
743 const SkMatrix& viewMatrix,
744 const SkPath& path,
745 const GrUserStencilSettings* stencilSettings) {
746 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
747 stencilSettings);
748 }
749
AAConvexPathOp(GrProcessorSet * processorSet,const SkPMColor4f & color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)750 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
751 const SkMatrix& viewMatrix, const SkPath& path,
752 const GrUserStencilSettings* stencilSettings)
753 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
754 fPaths.emplace_back(PathData{viewMatrix, path, color});
755 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
756 IsHairline::kNo);
757 }
758
name() const759 const char* name() const override { return "AAConvexPathOp"; }
760
visitProxies(const GrVisitProxyFunc & func) const761 void visitProxies(const GrVisitProxyFunc& func) const override {
762 if (fProgramInfo) {
763 fProgramInfo->visitFPProxies(func);
764 } else {
765 fHelper.visitProxies(func);
766 }
767 }
768
fixedFunctionFlags() const769 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
770
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)771 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
772 GrClampType clampType) override {
773 return fHelper.finalizeProcessors(
774 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
775 &fPaths.back().fColor, &fWideColor);
776 }
777
778 private:
programInfo()779 GrProgramInfo* programInfo() override { return fProgramInfo; }
780
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)781 void onCreateProgramInfo(const GrCaps* caps,
782 SkArenaAlloc* arena,
783 const GrSurfaceProxyView& writeView,
784 bool usesMSAASurface,
785 GrAppliedClip&& appliedClip,
786 const GrDstProxyView& dstProxyView,
787 GrXferBarrierFlags renderPassXferBarriers,
788 GrLoadOp colorLoadOp) override {
789 SkMatrix invert;
790 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
791 return;
792 }
793
794 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
795 fHelper.usesLocalCoords(),
796 fWideColor);
797
798 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
799 std::move(appliedClip),
800 dstProxyView, quadProcessor,
801 GrPrimitiveType::kTriangles,
802 renderPassXferBarriers, colorLoadOp);
803 }
804
onPrepareDraws(GrMeshDrawTarget * target)805 void onPrepareDraws(GrMeshDrawTarget* target) override {
806 int instanceCount = fPaths.size();
807
808 if (!fProgramInfo) {
809 this->createProgramInfo(target);
810 if (!fProgramInfo) {
811 return;
812 }
813 }
814
815 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
816
817 fDraws.reserve(instanceCount);
818
819 // TODO generate all segments for all paths and use one vertex buffer
820 for (int i = 0; i < instanceCount; i++) {
821 const PathData& args = fPaths[i];
822
823 // We use the fact that SkPath::transform path does subdivision based on
824 // perspective. Otherwise, we apply the view matrix when copying to the
825 // segment representation.
826 const SkMatrix* viewMatrix = &args.fViewMatrix;
827
828 // We avoid initializing the path unless we have to
829 const SkPath* pathPtr = &args.fPath;
830 SkTLazy<SkPath> tmpPath;
831 if (viewMatrix->hasPerspective()) {
832 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
833 tmpPathPtr->setIsVolatile(true);
834 tmpPathPtr->transform(*viewMatrix);
835 viewMatrix = &SkMatrix::I();
836 pathPtr = tmpPathPtr;
837 }
838
839 int vertexCount;
840 int indexCount;
841 static constexpr size_t kPreallocSegmentCnt = 512 / sizeof(Segment);
842 static constexpr size_t kPreallocDrawCnt = 4;
843
844 STArray<kPreallocSegmentCnt, Segment, true> segments;
845 SkPoint fanPt;
846
847 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
848 &indexCount)) {
849 continue;
850 }
851
852 sk_sp<const GrBuffer> vertexBuffer;
853 int firstVertex;
854
855 VertexWriter verts = target->makeVertexWriter(kVertexStride,
856 vertexCount,
857 &vertexBuffer,
858 &firstVertex);
859
860 if (!verts) {
861 SkDebugf("Could not allocate vertices\n");
862 return;
863 }
864
865 sk_sp<const GrBuffer> indexBuffer;
866 int firstIndex;
867
868 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
869 if (!idxs) {
870 SkDebugf("Could not allocate indices\n");
871 return;
872 }
873
874 STArray<kPreallocDrawCnt, Draw, true> draws;
875 VertexColor color(args.fColor, fWideColor);
876 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
877
878 GrSimpleMesh* meshes = target->allocMeshes(draws.size());
879 for (int j = 0; j < draws.size(); ++j) {
880 const Draw& draw = draws[j];
881 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
882 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
883 firstVertex);
884 firstIndex += draw.fIndexCnt;
885 firstVertex += draw.fVertexCnt;
886 }
887
888 fDraws.push_back({ meshes, draws.size() });
889 }
890 }
891
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)892 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
893 if (!fProgramInfo || fDraws.empty()) {
894 return;
895 }
896
897 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
898 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
899 for (int i = 0; i < fDraws.size(); ++i) {
900 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
901 flushState->drawMesh(fDraws[i].fMeshes[j]);
902 }
903 }
904 }
905
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)906 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
907 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
908 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
909 return CombineResult::kCannotCombine;
910 }
911 if (fHelper.usesLocalCoords() &&
912 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
913 return CombineResult::kCannotCombine;
914 }
915
916 fPaths.push_back_n(that->fPaths.size(), that->fPaths.begin());
917 fWideColor |= that->fWideColor;
918 return CombineResult::kMerged;
919 }
920
921 #if defined(GPU_TEST_UTILS)
onDumpInfo() const922 SkString onDumpInfo() const override {
923 return SkStringPrintf("Count: %d\n%s", fPaths.size(), fHelper.dumpInfo().c_str());
924 }
925 #endif
926
927 struct PathData {
928 SkMatrix fViewMatrix;
929 SkPath fPath;
930 SkPMColor4f fColor;
931 };
932
933 Helper fHelper;
934 STArray<1, PathData, true> fPaths;
935 bool fWideColor;
936
937 struct MeshDraw {
938 GrSimpleMesh* fMeshes;
939 int fMeshCount;
940 };
941
942 SkTDArray<MeshDraw> fDraws;
943 GrProgramInfo* fProgramInfo = nullptr;
944
945 using INHERITED = GrMeshDrawOp;
946 };
947
948 } // anonymous namespace
949
950 ///////////////////////////////////////////////////////////////////////////////
951
onCanDrawPath(const CanDrawPathArgs & args) const952 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
953 // This check requires convexity and known direction, since the direction is used to build
954 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
955 if (args.fCaps->shaderCaps()->fShaderDerivativeSupport &&
956 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
957 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
958 args.fShape->knownDirection()) {
959 return CanDrawPath::kYes;
960 }
961 return CanDrawPath::kNo;
962 }
963
onDrawPath(const DrawPathArgs & args)964 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
965 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
966 "AAConvexPathRenderer::onDrawPath");
967 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
968 SkASSERT(!args.fShape->isEmpty());
969
970 SkPath path;
971 args.fShape->asPath(&path);
972
973 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
974 *args.fViewMatrix,
975 path, args.fUserStencilSettings);
976 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
977 return true;
978 }
979
980 } // namespace skgpu::ganesh
981
982 #if defined(GPU_TEST_UTILS)
983
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)984 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
985 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
986 const SkPath& path = GrTest::TestPathConvex(random);
987 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
988 return skgpu::ganesh::AAConvexPathOp::Make(
989 context, std::move(paint), viewMatrix, path, stencilSettings);
990 }
991
992 #endif
993