2 * Copyright 2012 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "src/gpu/ganesh/ops/AAConvexPathRenderer.h"
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/KeyBuilder.h"
18 #include "src/gpu/ganesh/GrAuditTrail.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDrawOpTest.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrProcessor.h"
23 #include "src/gpu/ganesh/GrProgramInfo.h"
24 #include "src/gpu/ganesh/geometry/GrPathUtils.h"
25 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
26 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
27 #include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
28 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
29 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
30 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
31 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
32 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
33 #include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
41 // These enum values are assumed in member functions below.
46 // line uses one pt, quad uses 2 pts
48 // normal to edge ending at each pt
50 // is the corner where the previous segment meets this segment
51 // sharp. If so, fMid is a normalized bisector facing outward.
55 static_assert(0 == kLine && 1 == kQuad);
58 const SkPoint& endPt() const {
59 static_assert(0 == kLine && 1 == kQuad);
62 const SkPoint& endNorm() const {
63 static_assert(0 == kLine && 1 == kQuad);
68 typedef SkTArray<Segment, true> SegmentArray;
70 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
72 SkPoint center = {0, 0};
73 int count = segments.count();
79 // We translate the polygon so that the first point is at the origin.
80 // This avoids some precision issues with small area polygons far away
82 p0 = segments[0].endPt();
85 // the first and last iteration of the below loop would compute
86 // zeros since the starting / ending point is (0,0). So instead we start
87 // at i=1 and make the last iteration i=count-2.
88 pj = segments[1].endPt() - p0;
89 for (int i = 1; i < count - 1; ++i) {
91 pj = segments[i + 1].endPt() - p0;
93 SkScalar t = SkPoint::CrossProduct(pi, pj);
95 center.fX += (pi.fX + pj.fX) * t;
96 center.fY += (pi.fY + pj.fY) * t;
100 // If the poly has no area then we instead return the average of
102 if (SkScalarNearlyZero(area)) {
105 for (int i = 0; i < count; ++i) {
106 const SkPoint& pt = segments[i].endPt();
110 SkScalar denom = SK_Scalar1 / count;
115 area = SkScalarInvert(area);
117 // undo the translate of p0 to the origin.
120 return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
123 bool compute_vectors(SegmentArray* segments,
125 SkPathFirstDirection dir,
128 if (!center_of_mass(*segments, fanPt)) {
131 int count = segments->count();
133 // Make the normals point towards the outside
134 SkPointPriv::Side normSide;
135 if (dir == SkPathFirstDirection::kCCW) {
136 normSide = SkPointPriv::kRight_Side;
138 normSide = SkPointPriv::kLeft_Side;
141 int64_t vCount64 = 0;
142 int64_t iCount64 = 0;
143 // compute normals at all points
144 for (int a = 0; a < count; ++a) {
145 Segment& sega = (*segments)[a];
146 int b = (a + 1) % count;
147 Segment& segb = (*segments)[b];
149 const SkPoint* prevPt = &sega.endPt();
150 int n = segb.countPoints();
151 for (int p = 0; p < n; ++p) {
152 segb.fNorms[p] = segb.fPts[p] - *prevPt;
153 segb.fNorms[p].normalize();
154 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
155 prevPt = &segb.fPts[p];
157 if (Segment::kLine == segb.fType) {
166 // compute mid-vectors where segments meet. TODO: Detect shallow corners
167 // and leave out the wedges and close gaps by stitching segments together.
168 for (int a = 0; a < count; ++a) {
169 const Segment& sega = (*segments)[a];
170 int b = (a + 1) % count;
171 Segment& segb = (*segments)[b];
172 segb.fMid = segb.fNorms[0] + sega.endNorm();
173 segb.fMid.normalize();
178 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
186 struct DegenerateTestData {
187 DegenerateTestData() { fStage = kInitial; }
188 bool isDegenerate() const { return kNonDegenerate != fStage; }
196 SkVector fLineNormal;
200 static const SkScalar kClose = (SK_Scalar1 / 16);
201 static const SkScalar kCloseSqd = kClose * kClose;
203 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
204 switch (data->fStage) {
205 case DegenerateTestData::kInitial:
206 data->fFirstPoint = pt;
207 data->fStage = DegenerateTestData::kPoint;
209 case DegenerateTestData::kPoint:
210 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
211 data->fLineNormal = pt - data->fFirstPoint;
212 data->fLineNormal.normalize();
213 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
214 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
215 data->fStage = DegenerateTestData::kLine;
218 case DegenerateTestData::kLine:
219 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
220 data->fStage = DegenerateTestData::kNonDegenerate;
223 case DegenerateTestData::kNonDegenerate:
226 SK_ABORT("Unexpected degenerate test stage.");
230 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
231 // At this point, we've already returned true from canDraw(), which checked that the path's
232 // direction could be determined, so this should just be fetching the cached direction.
233 // However, if perspective is involved, we're operating on a transformed path, which may no
234 // longer have a computable direction.
235 *dir = SkPathPriv::ComputeFirstDirection(path);
236 if (*dir == SkPathFirstDirection::kUnknown) {
240 // check whether m reverses the orientation
241 SkASSERT(!m.hasPerspective());
242 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
243 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
245 *dir = SkPathPriv::OppositeFirstDirection(*dir);
251 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
252 segments->push_back();
253 segments->back().fType = Segment::kLine;
254 segments->back().fPts[0] = pt;
257 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
258 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
259 if (pts[0] != pts[2]) {
260 add_line_to_segment(pts[2], segments);
263 segments->push_back();
264 segments->back().fType = Segment::kQuad;
265 segments->back().fPts[0] = pts[1];
266 segments->back().fPts[1] = pts[2];
270 inline void add_cubic_segments(const SkPoint pts[4],
271 SkPathFirstDirection dir,
272 SegmentArray* segments) {
273 SkSTArray<15, SkPoint, true> quads;
274 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
275 int count = quads.count();
276 for (int q = 0; q < count; q += 3) {
277 add_quad_segment(&quads[q], segments);
281 bool get_segments(const SkPath& path,
283 SegmentArray* segments,
287 SkPath::Iter iter(path, true);
288 // This renderer over-emphasizes very thin path regions. We use the distance
289 // to the path from the sample to compute coverage. Every pixel intersected
290 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
291 // notice that the sample may be close to a very thin area of the path and
292 // thus should be very light. This is particularly egregious for degenerate
293 // line paths. We detect paths that are very close to a line (zero area) and
295 DegenerateTestData degenerateData;
296 SkPathFirstDirection dir;
297 if (!get_direction(path, m, &dir)) {
303 SkPath::Verb verb = iter.next(pts);
305 case SkPath::kMove_Verb:
307 update_degenerate_test(°enerateData, pts[0]);
309 case SkPath::kLine_Verb: {
310 if (!SkPathPriv::AllPointsEq(pts, 2)) {
311 m.mapPoints(&pts[1], 1);
312 update_degenerate_test(°enerateData, pts[1]);
313 add_line_to_segment(pts[1], segments);
317 case SkPath::kQuad_Verb:
318 if (!SkPathPriv::AllPointsEq(pts, 3)) {
320 update_degenerate_test(°enerateData, pts[1]);
321 update_degenerate_test(°enerateData, pts[2]);
322 add_quad_segment(pts, segments);
325 case SkPath::kConic_Verb: {
326 if (!SkPathPriv::AllPointsEq(pts, 3)) {
328 SkScalar weight = iter.conicWeight();
329 SkAutoConicToQuads converter;
330 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
331 for (int i = 0; i < converter.countQuads(); ++i) {
332 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
333 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
334 add_quad_segment(quadPts + 2*i, segments);
339 case SkPath::kCubic_Verb: {
340 if (!SkPathPriv::AllPointsEq(pts, 4)) {
342 update_degenerate_test(°enerateData, pts[1]);
343 update_degenerate_test(°enerateData, pts[2]);
344 update_degenerate_test(°enerateData, pts[3]);
345 add_cubic_segments(pts, dir, segments);
349 case SkPath::kDone_Verb:
350 if (degenerateData.isDegenerate()) {
353 return compute_vectors(segments, fanPt, dir, vCount, iCount);
362 Draw() : fVertexCnt(0), fIndexCnt(0) {}
367 typedef SkTArray<Draw, true> DrawArray;
369 void create_vertices(const SegmentArray& segments,
370 const SkPoint& fanPt,
371 const VertexColor& color,
375 size_t vertexStride) {
376 Draw* draw = &draws->push_back();
377 // alias just to make vert/index assignments easier to read.
378 int* v = &draw->fVertexCnt;
379 int* i = &draw->fIndexCnt;
381 int count = segments.count();
382 for (int a = 0; a < count; ++a) {
383 const Segment& sega = segments[a];
384 int b = (a + 1) % count;
385 const Segment& segb = segments[b];
387 // Check whether adding the verts for this segment to the current draw would cause index
388 // values to overflow.
390 if (Segment::kLine == segb.fType) {
395 if (draw->fVertexCnt + vCount > (1 << 16)) {
397 draw = &draws->push_back();
398 v = &draw->fVertexCnt;
399 i = &draw->fIndexCnt;
402 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
404 // FIXME: These tris are inset in the 1 unit arc around the corner
405 SkPoint p0 = sega.endPt();
406 // Position, Color, UV, D0, D1
407 verts << p0 << color << SkPoint{0, 0} << negOneDists;
408 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
409 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
410 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
412 idxs[*i + 0] = *v + 0;
413 idxs[*i + 1] = *v + 2;
414 idxs[*i + 2] = *v + 1;
415 idxs[*i + 3] = *v + 0;
416 idxs[*i + 4] = *v + 3;
417 idxs[*i + 5] = *v + 2;
422 if (Segment::kLine == segb.fType) {
423 // we draw the line edge as a degenerate quad (u is 0, v is the
424 // signed distance to the edge)
425 SkPoint v1Pos = sega.endPt();
426 SkPoint v2Pos = segb.fPts[0];
427 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
429 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
430 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
431 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
432 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
433 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
435 idxs[*i + 0] = *v + 3;
436 idxs[*i + 1] = *v + 1;
437 idxs[*i + 2] = *v + 2;
439 idxs[*i + 3] = *v + 4;
440 idxs[*i + 4] = *v + 3;
441 idxs[*i + 5] = *v + 2;
445 // Draw the interior fan if it exists.
446 // TODO: Detect and combine colinear segments. This will ensure we catch every case
447 // with no interior, and that the resulting shared edge uses the same endpoints.
449 idxs[*i + 0] = *v + 0;
450 idxs[*i + 1] = *v + 2;
451 idxs[*i + 2] = *v + 1;
458 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
460 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
461 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
463 // We must transform the positions into UV in cpu memory and then copy them to the gpu
464 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
465 // will cause us to read from the GPU buffer which can be very slow.
470 PosAndUV posAndUVPoints[6];
471 posAndUVPoints[0].fPos = fanPt;
472 posAndUVPoints[1].fPos = qpts[0];
473 posAndUVPoints[2].fPos = qpts[2];
474 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
475 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
476 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
478 posAndUVPoints[5].fPos = qpts[1] + midVec;
480 GrPathUtils::QuadUVMatrix toUV(qpts);
481 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
483 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
484 << (-segb.fNorms[0].dot(fanPt) + c0)
485 << (-segb.fNorms[1].dot(fanPt) + c1);
487 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
489 << (-segb.fNorms[1].dot(qpts[0]) + c1);
491 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
492 << (-segb.fNorms[0].dot(qpts[2]) + c0)
494 // We need a negative value that is very large that it won't effect results if it is
495 // interpolated with. However, the value can't be too large of a negative that it
496 // effects numerical precision on less powerful GPUs.
497 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
498 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
499 << kStableLargeNegativeValue
500 << kStableLargeNegativeValue;
502 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
503 << kStableLargeNegativeValue
504 << kStableLargeNegativeValue;
506 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
507 << kStableLargeNegativeValue
508 << kStableLargeNegativeValue;
510 idxs[*i + 0] = *v + 3;
511 idxs[*i + 1] = *v + 1;
512 idxs[*i + 2] = *v + 2;
513 idxs[*i + 3] = *v + 4;
514 idxs[*i + 4] = *v + 3;
515 idxs[*i + 5] = *v + 2;
517 idxs[*i + 6] = *v + 5;
518 idxs[*i + 7] = *v + 3;
519 idxs[*i + 8] = *v + 4;
523 // Draw the interior fan if it exists.
524 // TODO: Detect and combine colinear segments. This will ensure we catch every case
525 // with no interior, and that the resulting shared edge uses the same endpoints.
527 idxs[*i + 0] = *v + 0;
528 idxs[*i + 1] = *v + 2;
529 idxs[*i + 2] = *v + 1;
539 ///////////////////////////////////////////////////////////////////////////////
542 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
543 * two components of the vertex attribute. Coverage is based on signed
544 * distance with negative being inside, positive outside. The edge is specified in
545 * window space (y-down). If either the third or fourth component of the interpolated
546 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
547 * attempt to trim to a portion of the infinite quad.
548 * Requires shader derivative instruction support.
551 class QuadEdgeEffect : public GrGeometryProcessor {
553 static GrGeometryProcessor* Make(SkArenaAlloc* arena,
554 const SkMatrix& localMatrix,
555 bool usesLocalCoords,
557 return arena->make([&](void* ptr) {
558 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
562 ~QuadEdgeEffect() override {}
564 const char* name() const override { return "QuadEdge"; }
566 void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
567 b->addBool(fUsesLocalCoords, "usesLocalCoords");
568 b->addBits(ProgramImpl::kMatrixKeyBits,
569 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
573 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
576 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
577 : INHERITED(kQuadEdgeEffect_ClassID)
578 , fLocalMatrix(localMatrix)
579 , fUsesLocalCoords(usesLocalCoords) {
580 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
581 fInColor = MakeColorAttribute("inColor", wideColor);
582 // GL on iOS 14 needs more precision for the quadedge attributes
583 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
584 this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
587 Attribute fInPosition;
589 Attribute fInQuadEdge;
591 SkMatrix fLocalMatrix;
592 bool fUsesLocalCoords;
594 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
596 using INHERITED = GrGeometryProcessor;
599 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
600 const GrShaderCaps&) const {
601 class Impl : public ProgramImpl {
603 void setData(const GrGLSLProgramDataManager& pdman,
604 const GrShaderCaps& shaderCaps,
605 const GrGeometryProcessor& geomProc) override {
606 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
607 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
611 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
612 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
613 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
614 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
615 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
616 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
619 varyingHandler->emitAttributes(qe);
621 // GL on iOS 14 needs more precision for the quadedge attributes
622 // We might as well enable it everywhere
623 GrGLSLVarying v(SkSLType::kFloat4);
624 varyingHandler->addVarying("QuadEdge", &v);
625 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
627 // Setup pass through color
628 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
629 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
632 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
633 if (qe.fUsesLocalCoords) {
634 WriteLocalCoord(vertBuilder,
638 qe.fInPosition.asShaderVar(),
640 &fLocalMatrixUniform);
643 fragBuilder->codeAppendf("half edgeAlpha;");
645 // keep the derivative instructions outside the conditional
646 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
647 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
648 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
649 // today we know z and w are in device space. We could use derivatives
650 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
652 fragBuilder->codeAppendf ("} else {");
653 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
654 " half(2.0*%s.x*duvdy.x - duvdy.y));",
656 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
658 fragBuilder->codeAppendf("edgeAlpha = "
659 "saturate(0.5 - edgeAlpha / length(gF));}");
661 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
665 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
667 UniformHandle fLocalMatrixUniform;
670 return std::make_unique<Impl>();
673 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
676 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
677 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
678 bool usesLocalCoords = d->fRandom->nextBool();
679 bool wideColor = d->fRandom->nextBool();
680 // Doesn't work without derivative instructions.
681 return d->caps()->shaderCaps()->shaderDerivativeSupport()
682 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
687 class AAConvexPathOp final : public GrMeshDrawOp {
689 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
694 static GrOp::Owner Make(GrRecordingContext* context,
696 const SkMatrix& viewMatrix,
698 const GrUserStencilSettings* stencilSettings) {
699 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
703 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
704 const SkMatrix& viewMatrix, const SkPath& path,
705 const GrUserStencilSettings* stencilSettings)
706 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
707 fPaths.emplace_back(PathData{viewMatrix, path, color});
708 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
712 const char* name() const override { return "AAConvexPathOp"; }
714 void visitProxies(const GrVisitProxyFunc& func) const override {
716 fProgramInfo->visitFPProxies(func);
718 fHelper.visitProxies(func);
722 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
724 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
725 GrClampType clampType) override {
726 return fHelper.finalizeProcessors(
727 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
728 &fPaths.back().fColor, &fWideColor);
732 GrProgramInfo* programInfo() override { return fProgramInfo; }
734 void onCreateProgramInfo(const GrCaps* caps,
736 const GrSurfaceProxyView& writeView,
737 bool usesMSAASurface,
738 GrAppliedClip&& appliedClip,
739 const GrDstProxyView& dstProxyView,
740 GrXferBarrierFlags renderPassXferBarriers,
741 GrLoadOp colorLoadOp) override {
743 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
747 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
748 fHelper.usesLocalCoords(),
751 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
752 std::move(appliedClip),
753 dstProxyView, quadProcessor,
754 GrPrimitiveType::kTriangles,
755 renderPassXferBarriers, colorLoadOp);
758 void onPrepareDraws(GrMeshDrawTarget* target) override {
759 int instanceCount = fPaths.count();
762 this->createProgramInfo(target);
768 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
770 fDraws.reserve(instanceCount);
772 // TODO generate all segments for all paths and use one vertex buffer
773 for (int i = 0; i < instanceCount; i++) {
774 const PathData& args = fPaths[i];
776 // We use the fact that SkPath::transform path does subdivision based on
777 // perspective. Otherwise, we apply the view matrix when copying to the
778 // segment representation.
779 const SkMatrix* viewMatrix = &args.fViewMatrix;
781 // We avoid initializing the path unless we have to
782 const SkPath* pathPtr = &args.fPath;
783 SkTLazy<SkPath> tmpPath;
784 if (viewMatrix->hasPerspective()) {
785 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
786 tmpPathPtr->setIsVolatile(true);
787 tmpPathPtr->transform(*viewMatrix);
788 viewMatrix = &SkMatrix::I();
789 pathPtr = tmpPathPtr;
795 kPreallocSegmentCnt = 512 / sizeof(Segment),
796 kPreallocDrawCnt = 4,
798 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
801 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
806 sk_sp<const GrBuffer> vertexBuffer;
809 VertexWriter verts = target->makeVertexWriter(kVertexStride,
815 SkDebugf("Could not allocate vertices\n");
819 sk_sp<const GrBuffer> indexBuffer;
822 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
824 SkDebugf("Could not allocate indices\n");
828 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
829 VertexColor color(args.fColor, fWideColor);
830 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
832 GrSimpleMesh* meshes = target->allocMeshes(draws.count());
833 for (int j = 0; j < draws.count(); ++j) {
834 const Draw& draw = draws[j];
835 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
836 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
838 firstIndex += draw.fIndexCnt;
839 firstVertex += draw.fVertexCnt;
842 fDraws.push_back({ meshes, draws.count() });
846 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
847 if (!fProgramInfo || fDraws.isEmpty()) {
851 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
852 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
853 for (int i = 0; i < fDraws.count(); ++i) {
854 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
855 flushState->drawMesh(fDraws[i].fMeshes[j]);
860 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
861 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
862 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
863 return CombineResult::kCannotCombine;
865 if (fHelper.usesLocalCoords() &&
866 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
867 return CombineResult::kCannotCombine;
870 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
871 fWideColor |= that->fWideColor;
872 return CombineResult::kMerged;
876 SkString onDumpInfo() const override {
877 return SkStringPrintf("Count: %d\n%s", fPaths.count(), fHelper.dumpInfo().c_str());
882 SkMatrix fViewMatrix;
888 SkSTArray<1, PathData, true> fPaths;
892 GrSimpleMesh* fMeshes;
896 SkTDArray<MeshDraw> fDraws;
897 GrProgramInfo* fProgramInfo = nullptr;
899 using INHERITED = GrMeshDrawOp;
902 } // anonymous namespace
904 ///////////////////////////////////////////////////////////////////////////////
906 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
907 // This check requires convexity and known direction, since the direction is used to build
908 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
909 if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
910 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
911 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
912 args.fShape->knownDirection()) {
913 return CanDrawPath::kYes;
915 return CanDrawPath::kNo;
918 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
919 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
920 "AAConvexPathRenderer::onDrawPath");
921 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
922 SkASSERT(!args.fShape->isEmpty());
925 args.fShape->asPath(&path);
927 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
929 path, args.fUserStencilSettings);
930 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
934 } // namespace skgpu::v1
938 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
939 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
940 const SkPath& path = GrTest::TestPathConvex(random);
941 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
942 return skgpu::v1::AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path,