Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / src / gpu / ganesh / ops / AAConvexPathRenderer.cpp
1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "src/gpu/ganesh/ops/AAConvexPathRenderer.h"
9
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/KeyBuilder.h"
18 #include "src/gpu/ganesh/GrAuditTrail.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDrawOpTest.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrProcessor.h"
23 #include "src/gpu/ganesh/GrProgramInfo.h"
24 #include "src/gpu/ganesh/geometry/GrPathUtils.h"
25 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
26 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
27 #include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
28 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
29 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
30 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
31 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
32 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
33 #include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
34
35 namespace skgpu::v1 {
36
37 namespace {
38
39 struct Segment {
40     enum {
41         // These enum values are assumed in member functions below.
42         kLine = 0,
43         kQuad = 1,
44     } fType;
45
46     // line uses one pt, quad uses 2 pts
47     SkPoint fPts[2];
48     // normal to edge ending at each pt
49     SkVector fNorms[2];
50     // is the corner where the previous segment meets this segment
51     // sharp. If so, fMid is a normalized bisector facing outward.
52     SkVector fMid;
53
54     int countPoints() {
55         static_assert(0 == kLine && 1 == kQuad);
56         return fType + 1;
57     }
58     const SkPoint& endPt() const {
59         static_assert(0 == kLine && 1 == kQuad);
60         return fPts[fType];
61     }
62     const SkPoint& endNorm() const {
63         static_assert(0 == kLine && 1 == kQuad);
64         return fNorms[fType];
65     }
66 };
67
68 typedef SkTArray<Segment, true> SegmentArray;
69
70 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
71     SkScalar area = 0;
72     SkPoint center = {0, 0};
73     int count = segments.count();
74     if (count <= 0) {
75         return false;
76     }
77     SkPoint p0 = {0, 0};
78     if (count > 2) {
79         // We translate the polygon so that the first point is at the origin.
80         // This avoids some precision issues with small area polygons far away
81         // from the origin.
82         p0 = segments[0].endPt();
83         SkPoint pi;
84         SkPoint pj;
85         // the first and last iteration of the below loop would compute
86         // zeros since the starting / ending point is (0,0). So instead we start
87         // at i=1 and make the last iteration i=count-2.
88         pj = segments[1].endPt() - p0;
89         for (int i = 1; i < count - 1; ++i) {
90             pi = pj;
91             pj = segments[i + 1].endPt() - p0;
92
93             SkScalar t = SkPoint::CrossProduct(pi, pj);
94             area += t;
95             center.fX += (pi.fX + pj.fX) * t;
96             center.fY += (pi.fY + pj.fY) * t;
97         }
98     }
99
100     // If the poly has no area then we instead return the average of
101     // its points.
102     if (SkScalarNearlyZero(area)) {
103         SkPoint avg;
104         avg.set(0, 0);
105         for (int i = 0; i < count; ++i) {
106             const SkPoint& pt = segments[i].endPt();
107             avg.fX += pt.fX;
108             avg.fY += pt.fY;
109         }
110         SkScalar denom = SK_Scalar1 / count;
111         avg.scale(denom);
112         *c = avg;
113     } else {
114         area *= 3;
115         area = SkScalarInvert(area);
116         center.scale(area);
117         // undo the translate of p0 to the origin.
118         *c = center + p0;
119     }
120     return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
121 }
122
123 bool compute_vectors(SegmentArray* segments,
124                      SkPoint* fanPt,
125                      SkPathFirstDirection dir,
126                      int* vCount,
127                      int* iCount) {
128     if (!center_of_mass(*segments, fanPt)) {
129         return false;
130     }
131     int count = segments->count();
132
133     // Make the normals point towards the outside
134     SkPointPriv::Side normSide;
135     if (dir == SkPathFirstDirection::kCCW) {
136         normSide = SkPointPriv::kRight_Side;
137     } else {
138         normSide = SkPointPriv::kLeft_Side;
139     }
140
141     int64_t vCount64 = 0;
142     int64_t iCount64 = 0;
143     // compute normals at all points
144     for (int a = 0; a < count; ++a) {
145         Segment& sega = (*segments)[a];
146         int b = (a + 1) % count;
147         Segment& segb = (*segments)[b];
148
149         const SkPoint* prevPt = &sega.endPt();
150         int n = segb.countPoints();
151         for (int p = 0; p < n; ++p) {
152             segb.fNorms[p] = segb.fPts[p] - *prevPt;
153             segb.fNorms[p].normalize();
154             segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
155             prevPt = &segb.fPts[p];
156         }
157         if (Segment::kLine == segb.fType) {
158             vCount64 += 5;
159             iCount64 += 9;
160         } else {
161             vCount64 += 6;
162             iCount64 += 12;
163         }
164     }
165
166     // compute mid-vectors where segments meet. TODO: Detect shallow corners
167     // and leave out the wedges and close gaps by stitching segments together.
168     for (int a = 0; a < count; ++a) {
169         const Segment& sega = (*segments)[a];
170         int b = (a + 1) % count;
171         Segment& segb = (*segments)[b];
172         segb.fMid = segb.fNorms[0] + sega.endNorm();
173         segb.fMid.normalize();
174         // corner wedges
175         vCount64 += 4;
176         iCount64 += 6;
177     }
178     if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
179         return false;
180     }
181     *vCount = vCount64;
182     *iCount = iCount64;
183     return true;
184 }
185
186 struct DegenerateTestData {
187     DegenerateTestData() { fStage = kInitial; }
188     bool isDegenerate() const { return kNonDegenerate != fStage; }
189     enum {
190         kInitial,
191         kPoint,
192         kLine,
193         kNonDegenerate
194     }           fStage;
195     SkPoint     fFirstPoint;
196     SkVector    fLineNormal;
197     SkScalar    fLineC;
198 };
199
200 static const SkScalar kClose = (SK_Scalar1 / 16);
201 static const SkScalar kCloseSqd = kClose * kClose;
202
203 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
204     switch (data->fStage) {
205         case DegenerateTestData::kInitial:
206             data->fFirstPoint = pt;
207             data->fStage = DegenerateTestData::kPoint;
208             break;
209         case DegenerateTestData::kPoint:
210             if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
211                 data->fLineNormal = pt - data->fFirstPoint;
212                 data->fLineNormal.normalize();
213                 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
214                 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
215                 data->fStage = DegenerateTestData::kLine;
216             }
217             break;
218         case DegenerateTestData::kLine:
219             if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
220                 data->fStage = DegenerateTestData::kNonDegenerate;
221             }
222             break;
223         case DegenerateTestData::kNonDegenerate:
224             break;
225         default:
226             SK_ABORT("Unexpected degenerate test stage.");
227     }
228 }
229
230 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
231     // At this point, we've already returned true from canDraw(), which checked that the path's
232     // direction could be determined, so this should just be fetching the cached direction.
233     // However, if perspective is involved, we're operating on a transformed path, which may no
234     // longer have a computable direction.
235     *dir = SkPathPriv::ComputeFirstDirection(path);
236     if (*dir == SkPathFirstDirection::kUnknown) {
237         return false;
238     }
239
240     // check whether m reverses the orientation
241     SkASSERT(!m.hasPerspective());
242     SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
243                       m.get(SkMatrix::kMSkewX)  * m.get(SkMatrix::kMSkewY);
244     if (det2x2 < 0) {
245         *dir = SkPathPriv::OppositeFirstDirection(*dir);
246     }
247
248     return true;
249 }
250
251 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
252     segments->push_back();
253     segments->back().fType = Segment::kLine;
254     segments->back().fPts[0] = pt;
255 }
256
257 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
258     if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
259         if (pts[0] != pts[2]) {
260             add_line_to_segment(pts[2], segments);
261         }
262     } else {
263         segments->push_back();
264         segments->back().fType = Segment::kQuad;
265         segments->back().fPts[0] = pts[1];
266         segments->back().fPts[1] = pts[2];
267     }
268 }
269
270 inline void add_cubic_segments(const SkPoint pts[4],
271                                SkPathFirstDirection dir,
272                                SegmentArray* segments) {
273     SkSTArray<15, SkPoint, true> quads;
274     GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
275     int count = quads.count();
276     for (int q = 0; q < count; q += 3) {
277         add_quad_segment(&quads[q], segments);
278     }
279 }
280
281 bool get_segments(const SkPath& path,
282                   const SkMatrix& m,
283                   SegmentArray* segments,
284                   SkPoint* fanPt,
285                   int* vCount,
286                   int* iCount) {
287     SkPath::Iter iter(path, true);
288     // This renderer over-emphasizes very thin path regions. We use the distance
289     // to the path from the sample to compute coverage. Every pixel intersected
290     // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
291     // notice that the sample may be close to a very thin area of the path and
292     // thus should be very light. This is particularly egregious for degenerate
293     // line paths. We detect paths that are very close to a line (zero area) and
294     // draw nothing.
295     DegenerateTestData degenerateData;
296     SkPathFirstDirection dir;
297     if (!get_direction(path, m, &dir)) {
298         return false;
299     }
300
301     for (;;) {
302         SkPoint pts[4];
303         SkPath::Verb verb = iter.next(pts);
304         switch (verb) {
305             case SkPath::kMove_Verb:
306                 m.mapPoints(pts, 1);
307                 update_degenerate_test(&degenerateData, pts[0]);
308                 break;
309             case SkPath::kLine_Verb: {
310                 if (!SkPathPriv::AllPointsEq(pts, 2)) {
311                     m.mapPoints(&pts[1], 1);
312                     update_degenerate_test(&degenerateData, pts[1]);
313                     add_line_to_segment(pts[1], segments);
314                 }
315                 break;
316             }
317             case SkPath::kQuad_Verb:
318                 if (!SkPathPriv::AllPointsEq(pts, 3)) {
319                     m.mapPoints(pts, 3);
320                     update_degenerate_test(&degenerateData, pts[1]);
321                     update_degenerate_test(&degenerateData, pts[2]);
322                     add_quad_segment(pts, segments);
323                 }
324                 break;
325             case SkPath::kConic_Verb: {
326                 if (!SkPathPriv::AllPointsEq(pts, 3)) {
327                     m.mapPoints(pts, 3);
328                     SkScalar weight = iter.conicWeight();
329                     SkAutoConicToQuads converter;
330                     const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
331                     for (int i = 0; i < converter.countQuads(); ++i) {
332                         update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
333                         update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
334                         add_quad_segment(quadPts + 2*i, segments);
335                     }
336                 }
337                 break;
338             }
339             case SkPath::kCubic_Verb: {
340                 if (!SkPathPriv::AllPointsEq(pts, 4)) {
341                     m.mapPoints(pts, 4);
342                     update_degenerate_test(&degenerateData, pts[1]);
343                     update_degenerate_test(&degenerateData, pts[2]);
344                     update_degenerate_test(&degenerateData, pts[3]);
345                     add_cubic_segments(pts, dir, segments);
346                 }
347                 break;
348             }
349             case SkPath::kDone_Verb:
350                 if (degenerateData.isDegenerate()) {
351                     return false;
352                 } else {
353                     return compute_vectors(segments, fanPt, dir, vCount, iCount);
354                 }
355             default:
356                 break;
357         }
358     }
359 }
360
361 struct Draw {
362     Draw() : fVertexCnt(0), fIndexCnt(0) {}
363     int fVertexCnt;
364     int fIndexCnt;
365 };
366
367 typedef SkTArray<Draw, true> DrawArray;
368
369 void create_vertices(const SegmentArray& segments,
370                      const SkPoint& fanPt,
371                      const VertexColor& color,
372                      DrawArray* draws,
373                      VertexWriter& verts,
374                      uint16_t* idxs,
375                      size_t vertexStride) {
376     Draw* draw = &draws->push_back();
377     // alias just to make vert/index assignments easier to read.
378     int* v = &draw->fVertexCnt;
379     int* i = &draw->fIndexCnt;
380
381     int count = segments.count();
382     for (int a = 0; a < count; ++a) {
383         const Segment& sega = segments[a];
384         int b = (a + 1) % count;
385         const Segment& segb = segments[b];
386
387         // Check whether adding the verts for this segment to the current draw would cause index
388         // values to overflow.
389         int vCount = 4;
390         if (Segment::kLine == segb.fType) {
391             vCount += 5;
392         } else {
393             vCount += 6;
394         }
395         if (draw->fVertexCnt + vCount > (1 << 16)) {
396             idxs += *i;
397             draw = &draws->push_back();
398             v = &draw->fVertexCnt;
399             i = &draw->fIndexCnt;
400         }
401
402         const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
403
404         // FIXME: These tris are inset in the 1 unit arc around the corner
405         SkPoint p0 = sega.endPt();
406         // Position, Color, UV, D0, D1
407         verts << p0                    << color << SkPoint{0, 0}           << negOneDists;
408         verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
409         verts << (p0 + segb.fMid)      << color << SkPoint{0, -SK_Scalar1} << negOneDists;
410         verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
411
412         idxs[*i + 0] = *v + 0;
413         idxs[*i + 1] = *v + 2;
414         idxs[*i + 2] = *v + 1;
415         idxs[*i + 3] = *v + 0;
416         idxs[*i + 4] = *v + 3;
417         idxs[*i + 5] = *v + 2;
418
419         *v += 4;
420         *i += 6;
421
422         if (Segment::kLine == segb.fType) {
423             // we draw the line edge as a degenerate quad (u is 0, v is the
424             // signed distance to the edge)
425             SkPoint v1Pos = sega.endPt();
426             SkPoint v2Pos = segb.fPts[0];
427             SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
428
429             verts << fanPt                    << color << SkPoint{0, dist}        << negOneDists;
430             verts << v1Pos                    << color << SkPoint{0, 0}           << negOneDists;
431             verts << v2Pos                    << color << SkPoint{0, 0}           << negOneDists;
432             verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
433             verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
434
435             idxs[*i + 0] = *v + 3;
436             idxs[*i + 1] = *v + 1;
437             idxs[*i + 2] = *v + 2;
438
439             idxs[*i + 3] = *v + 4;
440             idxs[*i + 4] = *v + 3;
441             idxs[*i + 5] = *v + 2;
442
443             *i += 6;
444
445             // Draw the interior fan if it exists.
446             // TODO: Detect and combine colinear segments. This will ensure we catch every case
447             // with no interior, and that the resulting shared edge uses the same endpoints.
448             if (count >= 3) {
449                 idxs[*i + 0] = *v + 0;
450                 idxs[*i + 1] = *v + 2;
451                 idxs[*i + 2] = *v + 1;
452
453                 *i += 3;
454             }
455
456             *v += 5;
457         } else {
458             SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
459
460             SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
461             SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
462
463             // We must transform the positions into UV in cpu memory and then copy them to the gpu
464             // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
465             // will cause us to read from the GPU buffer which can be very slow.
466             struct PosAndUV {
467                 SkPoint fPos;
468                 SkPoint fUV;
469             };
470             PosAndUV posAndUVPoints[6];
471             posAndUVPoints[0].fPos = fanPt;
472             posAndUVPoints[1].fPos = qpts[0];
473             posAndUVPoints[2].fPos = qpts[2];
474             posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
475             posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
476             SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
477             midVec.normalize();
478             posAndUVPoints[5].fPos = qpts[1] + midVec;
479
480             GrPathUtils::QuadUVMatrix toUV(qpts);
481             toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
482
483             verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
484                   << (-segb.fNorms[0].dot(fanPt) + c0)
485                   << (-segb.fNorms[1].dot(fanPt) + c1);
486
487             verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
488                   << 0.0f
489                   << (-segb.fNorms[1].dot(qpts[0]) + c1);
490
491             verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
492                   << (-segb.fNorms[0].dot(qpts[2]) + c0)
493                   << 0.0f;
494             // We need a negative value that is very large that it won't effect results if it is
495             // interpolated with. However, the value can't be too large of a negative that it
496             // effects numerical precision on less powerful GPUs.
497             static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
498             verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
499                   << kStableLargeNegativeValue
500                   << kStableLargeNegativeValue;
501
502             verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
503                   << kStableLargeNegativeValue
504                   << kStableLargeNegativeValue;
505
506             verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
507                   << kStableLargeNegativeValue
508                   << kStableLargeNegativeValue;
509
510             idxs[*i + 0] = *v + 3;
511             idxs[*i + 1] = *v + 1;
512             idxs[*i + 2] = *v + 2;
513             idxs[*i + 3] = *v + 4;
514             idxs[*i + 4] = *v + 3;
515             idxs[*i + 5] = *v + 2;
516
517             idxs[*i + 6] = *v + 5;
518             idxs[*i + 7] = *v + 3;
519             idxs[*i + 8] = *v + 4;
520
521             *i += 9;
522
523             // Draw the interior fan if it exists.
524             // TODO: Detect and combine colinear segments. This will ensure we catch every case
525             // with no interior, and that the resulting shared edge uses the same endpoints.
526             if (count >= 3) {
527                 idxs[*i + 0] = *v + 0;
528                 idxs[*i + 1] = *v + 2;
529                 idxs[*i + 2] = *v + 1;
530
531                 *i += 3;
532             }
533
534             *v += 6;
535         }
536     }
537 }
538
539 ///////////////////////////////////////////////////////////////////////////////
540
541 /*
542  * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
543  * two components of the vertex attribute. Coverage is based on signed
544  * distance with negative being inside, positive outside. The edge is specified in
545  * window space (y-down). If either the third or fourth component of the interpolated
546  * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
547  * attempt to trim to a portion of the infinite quad.
548  * Requires shader derivative instruction support.
549  */
550
551 class QuadEdgeEffect : public GrGeometryProcessor {
552 public:
553     static GrGeometryProcessor* Make(SkArenaAlloc* arena,
554                                      const SkMatrix& localMatrix,
555                                      bool usesLocalCoords,
556                                      bool wideColor) {
557         return arena->make([&](void* ptr) {
558             return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
559         });
560     }
561
562     ~QuadEdgeEffect() override {}
563
564     const char* name() const override { return "QuadEdge"; }
565
566     void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
567         b->addBool(fUsesLocalCoords, "usesLocalCoords");
568         b->addBits(ProgramImpl::kMatrixKeyBits,
569                    ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
570                    "localMatrixType");
571     }
572
573     std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
574
575 private:
576     QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
577             : INHERITED(kQuadEdgeEffect_ClassID)
578             , fLocalMatrix(localMatrix)
579             , fUsesLocalCoords(usesLocalCoords) {
580         fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
581         fInColor = MakeColorAttribute("inColor", wideColor);
582         // GL on iOS 14 needs more precision for the quadedge attributes
583         fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
584         this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
585     }
586
587     Attribute fInPosition;
588     Attribute fInColor;
589     Attribute fInQuadEdge;
590
591     SkMatrix fLocalMatrix;
592     bool fUsesLocalCoords;
593
594     GR_DECLARE_GEOMETRY_PROCESSOR_TEST
595
596     using INHERITED = GrGeometryProcessor;
597 };
598
599 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
600         const GrShaderCaps&) const {
601     class Impl : public ProgramImpl {
602     public:
603         void setData(const GrGLSLProgramDataManager& pdman,
604                      const GrShaderCaps& shaderCaps,
605                      const GrGeometryProcessor& geomProc) override {
606             const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
607             SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
608         }
609
610     private:
611         void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
612             const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
613             GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
614             GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
615             GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
616             GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
617
618             // emit attributes
619             varyingHandler->emitAttributes(qe);
620
621             // GL on iOS 14 needs more precision for the quadedge attributes
622             // We might as well enable it everywhere
623             GrGLSLVarying v(SkSLType::kFloat4);
624             varyingHandler->addVarying("QuadEdge", &v);
625             vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
626
627             // Setup pass through color
628             fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
629             varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
630
631             // Setup position
632             WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
633             if (qe.fUsesLocalCoords) {
634                 WriteLocalCoord(vertBuilder,
635                                 uniformHandler,
636                                 *args.fShaderCaps,
637                                 gpArgs,
638                                 qe.fInPosition.asShaderVar(),
639                                 qe.fLocalMatrix,
640                                 &fLocalMatrixUniform);
641             }
642
643             fragBuilder->codeAppendf("half edgeAlpha;");
644
645             // keep the derivative instructions outside the conditional
646             fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
647             fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
648             fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
649             // today we know z and w are in device space. We could use derivatives
650             fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
651                                      v.fsIn());
652             fragBuilder->codeAppendf ("} else {");
653             fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
654                                      "                 half(2.0*%s.x*duvdy.x - duvdy.y));",
655                                      v.fsIn(), v.fsIn());
656             fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
657                                      v.fsIn());
658             fragBuilder->codeAppendf("edgeAlpha = "
659                                      "saturate(0.5 - edgeAlpha / length(gF));}");
660
661             fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
662         }
663
664     private:
665         SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
666
667         UniformHandle fLocalMatrixUniform;
668     };
669
670     return std::make_unique<Impl>();
671 }
672
673 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
674
675 #if GR_TEST_UTILS
676 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
677     SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
678     bool usesLocalCoords = d->fRandom->nextBool();
679     bool wideColor = d->fRandom->nextBool();
680     // Doesn't work without derivative instructions.
681     return d->caps()->shaderCaps()->shaderDerivativeSupport()
682                    ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
683                    : nullptr;
684 }
685 #endif
686
687 class AAConvexPathOp final : public GrMeshDrawOp {
688 private:
689     using Helper = GrSimpleMeshDrawOpHelperWithStencil;
690
691 public:
692     DEFINE_OP_CLASS_ID
693
694     static GrOp::Owner Make(GrRecordingContext* context,
695                             GrPaint&& paint,
696                             const SkMatrix& viewMatrix,
697                             const SkPath& path,
698                             const GrUserStencilSettings* stencilSettings) {
699         return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
700                                                      stencilSettings);
701     }
702
703     AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
704                    const SkMatrix& viewMatrix, const SkPath& path,
705                    const GrUserStencilSettings* stencilSettings)
706             : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
707         fPaths.emplace_back(PathData{viewMatrix, path, color});
708         this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
709                                    IsHairline::kNo);
710     }
711
712     const char* name() const override { return "AAConvexPathOp"; }
713
714     void visitProxies(const GrVisitProxyFunc& func) const override {
715         if (fProgramInfo) {
716             fProgramInfo->visitFPProxies(func);
717         } else {
718             fHelper.visitProxies(func);
719         }
720     }
721
722     FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
723
724     GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
725                                       GrClampType clampType) override {
726         return fHelper.finalizeProcessors(
727                 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
728                 &fPaths.back().fColor, &fWideColor);
729     }
730
731 private:
732     GrProgramInfo* programInfo() override { return fProgramInfo; }
733
734     void onCreateProgramInfo(const GrCaps* caps,
735                              SkArenaAlloc* arena,
736                              const GrSurfaceProxyView& writeView,
737                              bool usesMSAASurface,
738                              GrAppliedClip&& appliedClip,
739                              const GrDstProxyView& dstProxyView,
740                              GrXferBarrierFlags renderPassXferBarriers,
741                              GrLoadOp colorLoadOp) override {
742         SkMatrix invert;
743         if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
744             return;
745         }
746
747         GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
748                                                                   fHelper.usesLocalCoords(),
749                                                                   fWideColor);
750
751         fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
752                                                             std::move(appliedClip),
753                                                             dstProxyView, quadProcessor,
754                                                             GrPrimitiveType::kTriangles,
755                                                             renderPassXferBarriers, colorLoadOp);
756     }
757
758     void onPrepareDraws(GrMeshDrawTarget* target) override {
759         int instanceCount = fPaths.count();
760
761         if (!fProgramInfo) {
762             this->createProgramInfo(target);
763             if (!fProgramInfo) {
764                 return;
765             }
766         }
767
768         const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
769
770         fDraws.reserve(instanceCount);
771
772         // TODO generate all segments for all paths and use one vertex buffer
773         for (int i = 0; i < instanceCount; i++) {
774             const PathData& args = fPaths[i];
775
776             // We use the fact that SkPath::transform path does subdivision based on
777             // perspective. Otherwise, we apply the view matrix when copying to the
778             // segment representation.
779             const SkMatrix* viewMatrix = &args.fViewMatrix;
780
781             // We avoid initializing the path unless we have to
782             const SkPath* pathPtr = &args.fPath;
783             SkTLazy<SkPath> tmpPath;
784             if (viewMatrix->hasPerspective()) {
785                 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
786                 tmpPathPtr->setIsVolatile(true);
787                 tmpPathPtr->transform(*viewMatrix);
788                 viewMatrix = &SkMatrix::I();
789                 pathPtr = tmpPathPtr;
790             }
791
792             int vertexCount;
793             int indexCount;
794             enum {
795                 kPreallocSegmentCnt = 512 / sizeof(Segment),
796                 kPreallocDrawCnt = 4,
797             };
798             SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
799             SkPoint fanPt;
800
801             if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
802                               &indexCount)) {
803                 continue;
804             }
805
806             sk_sp<const GrBuffer> vertexBuffer;
807             int firstVertex;
808
809             VertexWriter verts = target->makeVertexWriter(kVertexStride,
810                                                           vertexCount,
811                                                           &vertexBuffer,
812                                                           &firstVertex);
813
814             if (!verts) {
815                 SkDebugf("Could not allocate vertices\n");
816                 return;
817             }
818
819             sk_sp<const GrBuffer> indexBuffer;
820             int firstIndex;
821
822             uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
823             if (!idxs) {
824                 SkDebugf("Could not allocate indices\n");
825                 return;
826             }
827
828             SkSTArray<kPreallocDrawCnt, Draw, true> draws;
829             VertexColor color(args.fColor, fWideColor);
830             create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
831
832             GrSimpleMesh* meshes = target->allocMeshes(draws.count());
833             for (int j = 0; j < draws.count(); ++j) {
834                 const Draw& draw = draws[j];
835                 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
836                                      draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
837                                      firstVertex);
838                 firstIndex += draw.fIndexCnt;
839                 firstVertex += draw.fVertexCnt;
840             }
841
842             fDraws.push_back({ meshes, draws.count() });
843         }
844     }
845
846     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
847         if (!fProgramInfo || fDraws.isEmpty()) {
848             return;
849         }
850
851         flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
852         flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
853         for (int i = 0; i < fDraws.count(); ++i) {
854             for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
855                 flushState->drawMesh(fDraws[i].fMeshes[j]);
856             }
857         }
858     }
859
860     CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
861         AAConvexPathOp* that = t->cast<AAConvexPathOp>();
862         if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
863             return CombineResult::kCannotCombine;
864         }
865         if (fHelper.usesLocalCoords() &&
866             !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
867             return CombineResult::kCannotCombine;
868         }
869
870         fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
871         fWideColor |= that->fWideColor;
872         return CombineResult::kMerged;
873     }
874
875 #if GR_TEST_UTILS
876     SkString onDumpInfo() const override {
877         return SkStringPrintf("Count: %d\n%s", fPaths.count(), fHelper.dumpInfo().c_str());
878     }
879 #endif
880
881     struct PathData {
882         SkMatrix    fViewMatrix;
883         SkPath      fPath;
884         SkPMColor4f fColor;
885     };
886
887     Helper fHelper;
888     SkSTArray<1, PathData, true> fPaths;
889     bool fWideColor;
890
891     struct MeshDraw {
892         GrSimpleMesh* fMeshes;
893         int fMeshCount;
894     };
895
896     SkTDArray<MeshDraw> fDraws;
897     GrProgramInfo*      fProgramInfo = nullptr;
898
899     using INHERITED = GrMeshDrawOp;
900 };
901
902 } // anonymous namespace
903
904 ///////////////////////////////////////////////////////////////////////////////
905
906 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
907     // This check requires convexity and known direction, since the direction is used to build
908     // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
909     if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
910         (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
911         !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
912         args.fShape->knownDirection()) {
913         return CanDrawPath::kYes;
914     }
915     return CanDrawPath::kNo;
916 }
917
918 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
919     GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
920                               "AAConvexPathRenderer::onDrawPath");
921     SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
922     SkASSERT(!args.fShape->isEmpty());
923
924     SkPath path;
925     args.fShape->asPath(&path);
926
927     GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
928                                           *args.fViewMatrix,
929                                           path, args.fUserStencilSettings);
930     args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
931     return true;
932 }
933
934 } // namespace skgpu::v1
935
936 #if GR_TEST_UTILS
937
938 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
939     SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
940     const SkPath& path = GrTest::TestPathConvex(random);
941     const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
942     return skgpu::v1::AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path,
943                                            stencilSettings);
944 }
945
946 #endif