3 * Copyright 2012 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
9 #include "GrAAConvexPathRenderer.h"
11 #include "GrAAConvexTessellator.h"
12 #include "GrBatchFlushState.h"
13 #include "GrBatchTest.h"
15 #include "GrContext.h"
16 #include "GrDefaultGeoProcFactory.h"
17 #include "GrGeometryProcessor.h"
18 #include "GrInvariantOutput.h"
19 #include "GrPathUtils.h"
20 #include "GrProcessor.h"
21 #include "GrPipelineBuilder.h"
22 #include "GrStrokeInfo.h"
23 #include "SkGeometry.h"
24 #include "SkPathPriv.h"
26 #include "SkTraceEvent.h"
27 #include "batches/GrVertexBatch.h"
28 #include "glsl/GrGLSLGeometryProcessor.h"
29 #include "glsl/GrGLSLProgramBuilder.h"
30 #include "glsl/GrGLSLProgramDataManager.h"
31 #include "glsl/GrGLSLVarying.h"
33 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
38 // These enum values are assumed in member functions below.
43 // line uses one pt, quad uses 2 pts
45 // normal to edge ending at each pt
47 // is the corner where the previous segment meets this segment
48 // sharp. If so, fMid is a normalized bisector facing outward.
52 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
55 const SkPoint& endPt() const {
56 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
59 const SkPoint& endNorm() const {
60 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
65 typedef SkTArray<Segment, true> SegmentArray;
67 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
69 SkPoint center = {0, 0};
70 int count = segments.count();
73 // We translate the polygon so that the first point is at the origin.
74 // This avoids some precision issues with small area polygons far away
76 p0 = segments[0].endPt();
79 // the first and last iteration of the below loop would compute
80 // zeros since the starting / ending point is (0,0). So instead we start
81 // at i=1 and make the last iteration i=count-2.
82 pj = segments[1].endPt() - p0;
83 for (int i = 1; i < count - 1; ++i) {
85 pj = segments[i + 1].endPt() - p0;
87 SkScalar t = SkPoint::CrossProduct(pi, pj);
89 center.fX += (pi.fX + pj.fX) * t;
90 center.fY += (pi.fY + pj.fY) * t;
94 // If the poly has no area then we instead return the average of
96 if (SkScalarNearlyZero(area)) {
99 for (int i = 0; i < count; ++i) {
100 const SkPoint& pt = segments[i].endPt();
104 SkScalar denom = SK_Scalar1 / count;
109 area = SkScalarInvert(area);
111 // undo the translate of p0 to the origin.
114 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
117 static void compute_vectors(SegmentArray* segments,
119 SkPathPriv::FirstDirection dir,
122 center_of_mass(*segments, fanPt);
123 int count = segments->count();
125 // Make the normals point towards the outside
126 SkPoint::Side normSide;
127 if (dir == SkPathPriv::kCCW_FirstDirection) {
128 normSide = SkPoint::kRight_Side;
130 normSide = SkPoint::kLeft_Side;
135 // compute normals at all points
136 for (int a = 0; a < count; ++a) {
137 Segment& sega = (*segments)[a];
138 int b = (a + 1) % count;
139 Segment& segb = (*segments)[b];
141 const SkPoint* prevPt = &sega.endPt();
142 int n = segb.countPoints();
143 for (int p = 0; p < n; ++p) {
144 segb.fNorms[p] = segb.fPts[p] - *prevPt;
145 segb.fNorms[p].normalize();
146 segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
147 prevPt = &segb.fPts[p];
149 if (Segment::kLine == segb.fType) {
158 // compute mid-vectors where segments meet. TODO: Detect shallow corners
159 // and leave out the wedges and close gaps by stitching segments together.
160 for (int a = 0; a < count; ++a) {
161 const Segment& sega = (*segments)[a];
162 int b = (a + 1) % count;
163 Segment& segb = (*segments)[b];
164 segb.fMid = segb.fNorms[0] + sega.endNorm();
165 segb.fMid.normalize();
172 struct DegenerateTestData {
173 DegenerateTestData() { fStage = kInitial; }
174 bool isDegenerate() const { return kNonDegenerate != fStage; }
182 SkVector fLineNormal;
186 static const SkScalar kClose = (SK_Scalar1 / 16);
187 static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
189 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
190 switch (data->fStage) {
191 case DegenerateTestData::kInitial:
192 data->fFirstPoint = pt;
193 data->fStage = DegenerateTestData::kPoint;
195 case DegenerateTestData::kPoint:
196 if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
197 data->fLineNormal = pt - data->fFirstPoint;
198 data->fLineNormal.normalize();
199 data->fLineNormal.setOrthog(data->fLineNormal);
200 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
201 data->fStage = DegenerateTestData::kLine;
204 case DegenerateTestData::kLine:
205 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
206 data->fStage = DegenerateTestData::kNonDegenerate;
208 case DegenerateTestData::kNonDegenerate:
211 SkFAIL("Unexpected degenerate test stage.");
215 static inline bool get_direction(const SkPath& path, const SkMatrix& m,
216 SkPathPriv::FirstDirection* dir) {
217 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
220 // check whether m reverses the orientation
221 SkASSERT(!m.hasPerspective());
222 SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
223 SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
225 *dir = SkPathPriv::OppositeFirstDirection(*dir);
230 static inline void add_line_to_segment(const SkPoint& pt,
231 SegmentArray* segments) {
232 segments->push_back();
233 segments->back().fType = Segment::kLine;
234 segments->back().fPts[0] = pt;
237 static inline void add_quad_segment(const SkPoint pts[3],
238 SegmentArray* segments) {
239 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
240 if (pts[0] != pts[2]) {
241 add_line_to_segment(pts[2], segments);
244 segments->push_back();
245 segments->back().fType = Segment::kQuad;
246 segments->back().fPts[0] = pts[1];
247 segments->back().fPts[1] = pts[2];
251 static inline void add_cubic_segments(const SkPoint pts[4],
252 SkPathPriv::FirstDirection dir,
253 SegmentArray* segments) {
254 SkSTArray<15, SkPoint, true> quads;
255 GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads);
256 int count = quads.count();
257 for (int q = 0; q < count; q += 3) {
258 add_quad_segment(&quads[q], segments);
262 static bool get_segments(const SkPath& path,
264 SegmentArray* segments,
268 SkPath::Iter iter(path, true);
269 // This renderer over-emphasizes very thin path regions. We use the distance
270 // to the path from the sample to compute coverage. Every pixel intersected
271 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
272 // notice that the sample may be close to a very thin area of the path and
273 // thus should be very light. This is particularly egregious for degenerate
274 // line paths. We detect paths that are very close to a line (zero area) and
276 DegenerateTestData degenerateData;
277 SkPathPriv::FirstDirection dir;
278 // get_direction can fail for some degenerate paths.
279 if (!get_direction(path, m, &dir)) {
285 SkPath::Verb verb = iter.next(pts);
287 case SkPath::kMove_Verb:
289 update_degenerate_test(°enerateData, pts[0]);
291 case SkPath::kLine_Verb: {
292 m.mapPoints(&pts[1], 1);
293 update_degenerate_test(°enerateData, pts[1]);
294 add_line_to_segment(pts[1], segments);
297 case SkPath::kQuad_Verb:
299 update_degenerate_test(°enerateData, pts[1]);
300 update_degenerate_test(°enerateData, pts[2]);
301 add_quad_segment(pts, segments);
303 case SkPath::kConic_Verb: {
305 SkScalar weight = iter.conicWeight();
306 SkAutoConicToQuads converter;
307 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
308 for (int i = 0; i < converter.countQuads(); ++i) {
309 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
310 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
311 add_quad_segment(quadPts + 2*i, segments);
315 case SkPath::kCubic_Verb: {
317 update_degenerate_test(°enerateData, pts[1]);
318 update_degenerate_test(°enerateData, pts[2]);
319 update_degenerate_test(°enerateData, pts[3]);
320 add_cubic_segments(pts, dir, segments);
323 case SkPath::kDone_Verb:
324 if (degenerateData.isDegenerate()) {
327 compute_vectors(segments, fanPt, dir, vCount, iCount);
344 Draw() : fVertexCnt(0), fIndexCnt(0) {}
349 typedef SkTArray<Draw, true> DrawArray;
351 static void create_vertices(const SegmentArray& segments,
352 const SkPoint& fanPt,
356 Draw* draw = &draws->push_back();
357 // alias just to make vert/index assignments easier to read.
358 int* v = &draw->fVertexCnt;
359 int* i = &draw->fIndexCnt;
361 int count = segments.count();
362 for (int a = 0; a < count; ++a) {
363 const Segment& sega = segments[a];
364 int b = (a + 1) % count;
365 const Segment& segb = segments[b];
367 // Check whether adding the verts for this segment to the current draw would cause index
368 // values to overflow.
370 if (Segment::kLine == segb.fType) {
375 if (draw->fVertexCnt + vCount > (1 << 16)) {
378 draw = &draws->push_back();
379 v = &draw->fVertexCnt;
380 i = &draw->fIndexCnt;
383 // FIXME: These tris are inset in the 1 unit arc around the corner
384 verts[*v + 0].fPos = sega.endPt();
385 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
386 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
387 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
388 verts[*v + 0].fUV.set(0,0);
389 verts[*v + 1].fUV.set(0,-SK_Scalar1);
390 verts[*v + 2].fUV.set(0,-SK_Scalar1);
391 verts[*v + 3].fUV.set(0,-SK_Scalar1);
392 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
393 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
394 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
395 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
397 idxs[*i + 0] = *v + 0;
398 idxs[*i + 1] = *v + 2;
399 idxs[*i + 2] = *v + 1;
400 idxs[*i + 3] = *v + 0;
401 idxs[*i + 4] = *v + 3;
402 idxs[*i + 5] = *v + 2;
407 if (Segment::kLine == segb.fType) {
408 verts[*v + 0].fPos = fanPt;
409 verts[*v + 1].fPos = sega.endPt();
410 verts[*v + 2].fPos = segb.fPts[0];
412 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
413 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
415 // we draw the line edge as a degenerate quad (u is 0, v is the
416 // signed distance to the edge)
417 SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
419 verts[*v + 0].fUV.set(0, dist);
420 verts[*v + 1].fUV.set(0, 0);
421 verts[*v + 2].fUV.set(0, 0);
422 verts[*v + 3].fUV.set(0, -SK_Scalar1);
423 verts[*v + 4].fUV.set(0, -SK_Scalar1);
425 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
426 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
427 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
428 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
429 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
431 idxs[*i + 0] = *v + 3;
432 idxs[*i + 1] = *v + 1;
433 idxs[*i + 2] = *v + 2;
435 idxs[*i + 3] = *v + 4;
436 idxs[*i + 4] = *v + 3;
437 idxs[*i + 5] = *v + 2;
441 // Draw the interior fan if it exists.
442 // TODO: Detect and combine colinear segments. This will ensure we catch every case
443 // with no interior, and that the resulting shared edge uses the same endpoints.
445 idxs[*i + 0] = *v + 0;
446 idxs[*i + 1] = *v + 2;
447 idxs[*i + 2] = *v + 1;
454 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
456 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
459 verts[*v + 0].fPos = fanPt;
460 verts[*v + 1].fPos = qpts[0];
461 verts[*v + 2].fPos = qpts[2];
462 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
463 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
464 verts[*v + 5].fPos = qpts[1] + midVec;
466 SkScalar c = segb.fNorms[0].dot(qpts[0]);
467 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
468 verts[*v + 1].fD0 = 0.f;
469 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
470 verts[*v + 3].fD0 = -SK_ScalarMax/100;
471 verts[*v + 4].fD0 = -SK_ScalarMax/100;
472 verts[*v + 5].fD0 = -SK_ScalarMax/100;
474 c = segb.fNorms[1].dot(qpts[2]);
475 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
476 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
477 verts[*v + 2].fD1 = 0.f;
478 verts[*v + 3].fD1 = -SK_ScalarMax/100;
479 verts[*v + 4].fD1 = -SK_ScalarMax/100;
480 verts[*v + 5].fD1 = -SK_ScalarMax/100;
482 GrPathUtils::QuadUVMatrix toUV(qpts);
483 toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
485 idxs[*i + 0] = *v + 3;
486 idxs[*i + 1] = *v + 1;
487 idxs[*i + 2] = *v + 2;
488 idxs[*i + 3] = *v + 4;
489 idxs[*i + 4] = *v + 3;
490 idxs[*i + 5] = *v + 2;
492 idxs[*i + 6] = *v + 5;
493 idxs[*i + 7] = *v + 3;
494 idxs[*i + 8] = *v + 4;
498 // Draw the interior fan if it exists.
499 // TODO: Detect and combine colinear segments. This will ensure we catch every case
500 // with no interior, and that the resulting shared edge uses the same endpoints.
502 idxs[*i + 0] = *v + 0;
503 idxs[*i + 1] = *v + 2;
504 idxs[*i + 2] = *v + 1;
514 ///////////////////////////////////////////////////////////////////////////////
517 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
518 * two components of the vertex attribute. Coverage is based on signed
519 * distance with negative being inside, positive outside. The edge is specified in
520 * window space (y-down). If either the third or fourth component of the interpolated
521 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
522 * attempt to trim to a portion of the infinite quad.
523 * Requires shader derivative instruction support.
526 class QuadEdgeEffect : public GrGeometryProcessor {
529 static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatrix,
530 bool usesLocalCoords) {
531 return new QuadEdgeEffect(color, localMatrix, usesLocalCoords);
534 virtual ~QuadEdgeEffect() {}
536 const char* name() const override { return "QuadEdge"; }
538 const Attribute* inPosition() const { return fInPosition; }
539 const Attribute* inQuadEdge() const { return fInQuadEdge; }
540 GrColor color() const { return fColor; }
541 bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
542 const SkMatrix& localMatrix() const { return fLocalMatrix; }
543 bool usesLocalCoords() const { return fUsesLocalCoords; }
545 class GLSLProcessor : public GrGLSLGeometryProcessor {
548 : fColor(GrColor_ILLEGAL) {}
550 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
551 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
552 GrGLSLGPBuilder* pb = args.fPB;
553 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
554 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
557 varyingHandler->emitAttributes(qe);
559 GrGLSLVertToFrag v(kVec4f_GrSLType);
560 varyingHandler->addVarying("QuadEdge", &v);
561 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
563 GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
564 // Setup pass through color
565 if (!qe.colorIgnored()) {
566 this->setupUniformColor(pb, fragBuilder, args.fOutputColor, &fColorUniform);
570 this->setupPosition(pb, vertBuilder, gpArgs, qe.inPosition()->fName);
573 this->emitTransforms(args.fPB,
576 gpArgs->fPositionVar,
577 qe.inPosition()->fName,
580 args.fTransformsOut);
582 SkAssertResult(fragBuilder->enableFeature(
583 GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
584 fragBuilder->codeAppendf("float edgeAlpha;");
586 // keep the derivative instructions outside the conditional
587 fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
588 fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
589 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
590 // today we know z and w are in device space. We could use derivatives
591 fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
593 fragBuilder->codeAppendf ("} else {");
594 fragBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
595 " 2.0*%s.x*duvdy.x - duvdy.y);",
597 fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
599 fragBuilder->codeAppendf("edgeAlpha = "
600 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
602 fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
605 static inline void GenKey(const GrGeometryProcessor& gp,
607 GrProcessorKeyBuilder* b) {
608 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
610 key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ? 0x1 : 0x0;
611 key |= qee.colorIgnored() ? 0x2 : 0x0;
615 void setData(const GrGLSLProgramDataManager& pdman,
616 const GrPrimitiveProcessor& gp) override {
617 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
618 if (qe.color() != fColor) {
620 GrColorToRGBAFloat(qe.color(), c);
621 pdman.set4fv(fColorUniform, 1, c);
626 void setTransformData(const GrPrimitiveProcessor& primProc,
627 const GrGLSLProgramDataManager& pdman,
629 const SkTArray<const GrCoordTransform*, true>& transforms) override {
630 this->setTransformDataHelper<QuadEdgeEffect>(primProc, pdman, index, transforms);
635 UniformHandle fColorUniform;
637 typedef GrGLSLGeometryProcessor INHERITED;
640 void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
641 GLSLProcessor::GenKey(*this, caps, b);
644 GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
645 return new GLSLProcessor();
649 QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoords)
651 , fLocalMatrix(localMatrix)
652 , fUsesLocalCoords(usesLocalCoords) {
653 this->initClassID<QuadEdgeEffect>();
654 fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVertexAttribType));
655 fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVertexAttribType));
658 const Attribute* fInPosition;
659 const Attribute* fInQuadEdge;
661 SkMatrix fLocalMatrix;
662 bool fUsesLocalCoords;
664 GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
666 typedef GrGeometryProcessor INHERITED;
669 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
671 const GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
672 // Doesn't work without derivative instructions.
673 return d->fCaps->shaderCaps()->shaderDerivativeSupport() ?
674 QuadEdgeEffect::Create(GrRandomColor(d->fRandom),
675 GrTest::TestMatrix(d->fRandom),
676 d->fRandom->nextBool()) : nullptr;
679 ///////////////////////////////////////////////////////////////////////////////
681 bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
682 return (args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias &&
683 args.fStroke->isFillStyle() && !args.fPath->isInverseFillType() &&
684 args.fPath->isConvex());
687 // extract the result vertices and indices from the GrAAConvexTessellator
688 static void extract_verts(const GrAAConvexTessellator& tess,
693 bool tweakAlphaForCoverage) {
694 intptr_t verts = reinterpret_cast<intptr_t>(vertices);
696 for (int i = 0; i < tess.numPts(); ++i) {
697 *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
700 // Make 'verts' point to the colors
701 verts += sizeof(SkPoint);
702 for (int i = 0; i < tess.numPts(); ++i) {
703 if (tweakAlphaForCoverage) {
704 SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
705 unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
706 GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
707 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
709 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
710 *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
715 for (int i = 0; i < tess.numIndices(); ++i) {
716 idxs[i] = tess.index(i);
720 static const GrGeometryProcessor* create_fill_gp(bool tweakAlphaForCoverage,
721 const SkMatrix& viewMatrix,
722 bool usesLocalCoords,
723 bool coverageIgnored) {
724 using namespace GrDefaultGeoProcFactory;
726 Color color(Color::kAttribute_Type);
727 Coverage::Type coverageType;
728 // TODO remove coverage if coverage is ignored
729 /*if (coverageIgnored) {
730 coverageType = Coverage::kNone_Type;
731 } else*/ if (tweakAlphaForCoverage) {
732 coverageType = Coverage::kSolid_Type;
734 coverageType = Coverage::kAttribute_Type;
736 Coverage coverage(coverageType);
737 LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
738 LocalCoords::kUnused_Type);
739 return CreateForDeviceSpace(color, coverage, localCoords, viewMatrix);
742 class AAConvexPathBatch : public GrVertexBatch {
744 DEFINE_BATCH_CLASS_ID
747 SkMatrix fViewMatrix;
751 static GrDrawBatch* Create(const Geometry& geometry) { return new AAConvexPathBatch(geometry); }
753 const char* name() const override { return "AAConvexBatch"; }
755 void computePipelineOptimizations(GrInitInvariantOutput* color,
756 GrInitInvariantOutput* coverage,
757 GrBatchToXPOverrides* overrides) const override {
758 // When this is called on a batch, there is only one geometry bundle
759 color->setKnownFourComponents(fGeoData[0].fColor);
760 coverage->setUnknownSingleComponent();
761 overrides->fUsePLSDstRead = false;
765 void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
766 // Handle any color overrides
767 if (!overrides.readsColor()) {
768 fGeoData[0].fColor = GrColor_ILLEGAL;
770 overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
772 // setup batch properties
773 fBatch.fColorIgnored = !overrides.readsColor();
774 fBatch.fColor = fGeoData[0].fColor;
775 fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
776 fBatch.fCoverageIgnored = !overrides.readsCoverage();
777 fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
778 fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
781 void prepareLinesOnlyDraws(Target* target) const {
782 bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
784 // Setup GrGeometryProcessor
785 SkAutoTUnref<const GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
787 this->usesLocalCoords(),
788 this->coverageIgnored()));
790 SkDebugf("Could not create GrGeometryProcessor\n");
794 target->initDraw(gp, this->pipeline());
796 size_t vertexStride = gp->getVertexStride();
798 SkASSERT(canTweakAlphaForCoverage ?
799 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
800 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
802 GrAAConvexTessellator tess;
804 int instanceCount = fGeoData.count();
806 for (int i = 0; i < instanceCount; i++) {
809 const Geometry& args = fGeoData[i];
811 if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
815 const GrVertexBuffer* vertexBuffer;
818 void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
821 SkDebugf("Could not allocate vertices\n");
825 const GrIndexBuffer* indexBuffer;
828 uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
830 SkDebugf("Could not allocate indices\n");
834 extract_verts(tess, verts, vertexStride, args.fColor, idxs, canTweakAlphaForCoverage);
837 info.initIndexed(kTriangles_GrPrimitiveType,
838 vertexBuffer, indexBuffer,
839 firstVertex, firstIndex,
840 tess.numPts(), tess.numIndices());
845 void onPrepareDraws(Target* target) const override {
846 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
847 if (this->linesOnly()) {
848 this->prepareLinesOnlyDraws(target);
853 int instanceCount = fGeoData.count();
856 if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
857 SkDebugf("Could not invert viewmatrix\n");
861 // Setup GrGeometryProcessor
862 SkAutoTUnref<GrGeometryProcessor> quadProcessor(
863 QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoords()));
865 target->initDraw(quadProcessor, this->pipeline());
867 // TODO generate all segments for all paths and use one vertex buffer
868 for (int i = 0; i < instanceCount; i++) {
869 const Geometry& args = fGeoData[i];
871 // We use the fact that SkPath::transform path does subdivision based on
872 // perspective. Otherwise, we apply the view matrix when copying to the
873 // segment representation.
874 const SkMatrix* viewMatrix = &args.fViewMatrix;
876 // We avoid initializing the path unless we have to
877 const SkPath* pathPtr = &args.fPath;
878 SkTLazy<SkPath> tmpPath;
879 if (viewMatrix->hasPerspective()) {
880 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
881 tmpPathPtr->setIsVolatile(true);
882 tmpPathPtr->transform(*viewMatrix);
883 viewMatrix = &SkMatrix::I();
884 pathPtr = tmpPathPtr;
890 kPreallocSegmentCnt = 512 / sizeof(Segment),
891 kPreallocDrawCnt = 4,
893 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
896 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
901 const GrVertexBuffer* vertexBuffer;
904 size_t vertexStride = quadProcessor->getVertexStride();
905 QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
906 vertexStride, vertexCount, &vertexBuffer, &firstVertex));
909 SkDebugf("Could not allocate vertices\n");
913 const GrIndexBuffer* indexBuffer;
916 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
918 SkDebugf("Could not allocate indices\n");
922 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
923 create_vertices(segments, fanPt, &draws, verts, idxs);
927 for (int j = 0; j < draws.count(); ++j) {
928 const Draw& draw = draws[j];
929 vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
930 firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
931 target->draw(vertices);
932 firstVertex += draw.fVertexCnt;
933 firstIndex += draw.fIndexCnt;
938 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
940 AAConvexPathBatch(const Geometry& geometry) : INHERITED(ClassID()) {
941 fGeoData.push_back(geometry);
944 fBounds = geometry.fPath.getBounds();
945 geometry.fViewMatrix.mapRect(&fBounds);
948 bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
949 AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
950 if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
951 that->bounds(), caps)) {
955 if (this->color() != that->color()) {
959 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
960 if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
964 if (this->linesOnly() != that->linesOnly()) {
968 // In the event of two batches, one who can tweak, one who cannot, we just fall back to
970 if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
971 fBatch.fCanTweakAlphaForCoverage = false;
974 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
975 this->joinBounds(that->bounds());
979 GrColor color() const { return fBatch.fColor; }
980 bool linesOnly() const { return fBatch.fLinesOnly; }
981 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
982 bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
983 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
984 bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
986 struct BatchTracker {
988 bool fUsesLocalCoords;
990 bool fCoverageIgnored;
992 bool fCanTweakAlphaForCoverage;
996 SkSTArray<1, Geometry, true> fGeoData;
998 typedef GrVertexBatch INHERITED;
1001 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
1002 if (args.fPath->isEmpty()) {
1006 AAConvexPathBatch::Geometry geometry;
1007 geometry.fColor = args.fColor;
1008 geometry.fViewMatrix = *args.fViewMatrix;
1009 geometry.fPath = *args.fPath;
1011 SkAutoTUnref<GrDrawBatch> batch(AAConvexPathBatch::Create(geometry));
1012 args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
1018 ///////////////////////////////////////////////////////////////////////////////////////////////////
1020 #ifdef GR_TEST_UTILS
1022 DRAW_BATCH_TEST_DEFINE(AAConvexPathBatch) {
1023 AAConvexPathBatch::Geometry geometry;
1024 geometry.fColor = GrRandomColor(random);
1025 geometry.fViewMatrix = GrTest::TestMatrixInvertible(random);
1026 geometry.fPath = GrTest::TestPathConvex(random);
1028 return AAConvexPathBatch::Create(geometry);