3 * Copyright 2012 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
9 #include "GrAAConvexPathRenderer.h"
12 #include "GrBatchTarget.h"
13 #include "GrBufferAllocPool.h"
14 #include "GrContext.h"
15 #include "GrDrawTargetCaps.h"
16 #include "GrGeometryProcessor.h"
17 #include "GrInvariantOutput.h"
18 #include "GrPathUtils.h"
19 #include "GrProcessor.h"
20 #include "GrPipelineBuilder.h"
21 #include "SkGeometry.h"
23 #include "SkStrokeRec.h"
24 #include "SkTraceEvent.h"
25 #include "gl/GrGLProcessor.h"
26 #include "gl/GrGLSL.h"
27 #include "gl/GrGLGeometryProcessor.h"
28 #include "gl/builders/GrGLProgramBuilder.h"
30 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
35 // These enum values are assumed in member functions below.
40 // line uses one pt, quad uses 2 pts
42 // normal to edge ending at each pt
44 // is the corner where the previous segment meets this segment
45 // sharp. If so, fMid is a normalized bisector facing outward.
49 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
52 const SkPoint& endPt() const {
53 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
56 const SkPoint& endNorm() const {
57 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
62 typedef SkTArray<Segment, true> SegmentArray;
64 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
66 SkPoint center = {0, 0};
67 int count = segments.count();
70 // We translate the polygon so that the first point is at the origin.
71 // This avoids some precision issues with small area polygons far away
73 p0 = segments[0].endPt();
76 // the first and last iteration of the below loop would compute
77 // zeros since the starting / ending point is (0,0). So instead we start
78 // at i=1 and make the last iteration i=count-2.
79 pj = segments[1].endPt() - p0;
80 for (int i = 1; i < count - 1; ++i) {
82 const SkPoint pj = segments[i + 1].endPt() - p0;
84 SkScalar t = SkScalarMul(pi.fX, pj.fY) - SkScalarMul(pj.fX, pi.fY);
86 center.fX += (pi.fX + pj.fX) * t;
87 center.fY += (pi.fY + pj.fY) * t;
91 // If the poly has no area then we instead return the average of
93 if (SkScalarNearlyZero(area)) {
96 for (int i = 0; i < count; ++i) {
97 const SkPoint& pt = segments[i].endPt();
101 SkScalar denom = SK_Scalar1 / count;
106 area = SkScalarDiv(SK_Scalar1, area);
107 center.fX = SkScalarMul(center.fX, area);
108 center.fY = SkScalarMul(center.fY, area);
109 // undo the translate of p0 to the origin.
112 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
115 static void compute_vectors(SegmentArray* segments,
117 SkPath::Direction dir,
120 center_of_mass(*segments, fanPt);
121 int count = segments->count();
123 // Make the normals point towards the outside
124 SkPoint::Side normSide;
125 if (dir == SkPath::kCCW_Direction) {
126 normSide = SkPoint::kRight_Side;
128 normSide = SkPoint::kLeft_Side;
133 // compute normals at all points
134 for (int a = 0; a < count; ++a) {
135 Segment& sega = (*segments)[a];
136 int b = (a + 1) % count;
137 Segment& segb = (*segments)[b];
139 const SkPoint* prevPt = &sega.endPt();
140 int n = segb.countPoints();
141 for (int p = 0; p < n; ++p) {
142 segb.fNorms[p] = segb.fPts[p] - *prevPt;
143 segb.fNorms[p].normalize();
144 segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
145 prevPt = &segb.fPts[p];
147 if (Segment::kLine == segb.fType) {
156 // compute mid-vectors where segments meet. TODO: Detect shallow corners
157 // and leave out the wedges and close gaps by stitching segments together.
158 for (int a = 0; a < count; ++a) {
159 const Segment& sega = (*segments)[a];
160 int b = (a + 1) % count;
161 Segment& segb = (*segments)[b];
162 segb.fMid = segb.fNorms[0] + sega.endNorm();
163 segb.fMid.normalize();
170 struct DegenerateTestData {
171 DegenerateTestData() { fStage = kInitial; }
172 bool isDegenerate() const { return kNonDegenerate != fStage; }
180 SkVector fLineNormal;
184 static const SkScalar kClose = (SK_Scalar1 / 16);
185 static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
187 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
188 switch (data->fStage) {
189 case DegenerateTestData::kInitial:
190 data->fFirstPoint = pt;
191 data->fStage = DegenerateTestData::kPoint;
193 case DegenerateTestData::kPoint:
194 if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
195 data->fLineNormal = pt - data->fFirstPoint;
196 data->fLineNormal.normalize();
197 data->fLineNormal.setOrthog(data->fLineNormal);
198 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
199 data->fStage = DegenerateTestData::kLine;
202 case DegenerateTestData::kLine:
203 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
204 data->fStage = DegenerateTestData::kNonDegenerate;
206 case DegenerateTestData::kNonDegenerate:
209 SkFAIL("Unexpected degenerate test stage.");
213 static inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPath::Direction* dir) {
214 if (!path.cheapComputeDirection(dir)) {
217 // check whether m reverses the orientation
218 SkASSERT(!m.hasPerspective());
219 SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
220 SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
222 *dir = SkPath::OppositeDirection(*dir);
227 static inline void add_line_to_segment(const SkPoint& pt,
228 SegmentArray* segments) {
229 segments->push_back();
230 segments->back().fType = Segment::kLine;
231 segments->back().fPts[0] = pt;
234 static inline void add_quad_segment(const SkPoint pts[3],
235 SegmentArray* segments) {
236 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
237 if (pts[0] != pts[2]) {
238 add_line_to_segment(pts[2], segments);
241 segments->push_back();
242 segments->back().fType = Segment::kQuad;
243 segments->back().fPts[0] = pts[1];
244 segments->back().fPts[1] = pts[2];
248 static inline void add_cubic_segments(const SkPoint pts[4],
249 SkPath::Direction dir,
250 SegmentArray* segments) {
251 SkSTArray<15, SkPoint, true> quads;
252 GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads);
253 int count = quads.count();
254 for (int q = 0; q < count; q += 3) {
255 add_quad_segment(&quads[q], segments);
259 static bool get_segments(const SkPath& path,
261 SegmentArray* segments,
265 SkPath::Iter iter(path, true);
266 // This renderer over-emphasizes very thin path regions. We use the distance
267 // to the path from the sample to compute coverage. Every pixel intersected
268 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
269 // notice that the sample may be close to a very thin area of the path and
270 // thus should be very light. This is particularly egregious for degenerate
271 // line paths. We detect paths that are very close to a line (zero area) and
273 DegenerateTestData degenerateData;
274 SkPath::Direction dir;
275 // get_direction can fail for some degenerate paths.
276 if (!get_direction(path, m, &dir)) {
282 SkPath::Verb verb = iter.next(pts);
284 case SkPath::kMove_Verb:
286 update_degenerate_test(°enerateData, pts[0]);
288 case SkPath::kLine_Verb: {
289 m.mapPoints(&pts[1], 1);
290 update_degenerate_test(°enerateData, pts[1]);
291 add_line_to_segment(pts[1], segments);
294 case SkPath::kQuad_Verb:
296 update_degenerate_test(°enerateData, pts[1]);
297 update_degenerate_test(°enerateData, pts[2]);
298 add_quad_segment(pts, segments);
300 case SkPath::kConic_Verb: {
302 SkScalar weight = iter.conicWeight();
303 SkAutoConicToQuads converter;
304 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
305 for (int i = 0; i < converter.countQuads(); ++i) {
306 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
307 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
308 add_quad_segment(quadPts + 2*i, segments);
312 case SkPath::kCubic_Verb: {
314 update_degenerate_test(°enerateData, pts[1]);
315 update_degenerate_test(°enerateData, pts[2]);
316 update_degenerate_test(°enerateData, pts[3]);
317 add_cubic_segments(pts, dir, segments);
320 case SkPath::kDone_Verb:
321 if (degenerateData.isDegenerate()) {
324 compute_vectors(segments, fanPt, dir, vCount, iCount);
341 Draw() : fVertexCnt(0), fIndexCnt(0) {}
346 typedef SkTArray<Draw, true> DrawArray;
348 static void create_vertices(const SegmentArray& segments,
349 const SkPoint& fanPt,
353 Draw* draw = &draws->push_back();
354 // alias just to make vert/index assignments easier to read.
355 int* v = &draw->fVertexCnt;
356 int* i = &draw->fIndexCnt;
358 int count = segments.count();
359 for (int a = 0; a < count; ++a) {
360 const Segment& sega = segments[a];
361 int b = (a + 1) % count;
362 const Segment& segb = segments[b];
364 // Check whether adding the verts for this segment to the current draw would cause index
365 // values to overflow.
367 if (Segment::kLine == segb.fType) {
372 if (draw->fVertexCnt + vCount > (1 << 16)) {
375 draw = &draws->push_back();
376 v = &draw->fVertexCnt;
377 i = &draw->fIndexCnt;
380 // FIXME: These tris are inset in the 1 unit arc around the corner
381 verts[*v + 0].fPos = sega.endPt();
382 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
383 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
384 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
385 verts[*v + 0].fUV.set(0,0);
386 verts[*v + 1].fUV.set(0,-SK_Scalar1);
387 verts[*v + 2].fUV.set(0,-SK_Scalar1);
388 verts[*v + 3].fUV.set(0,-SK_Scalar1);
389 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
390 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
391 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
392 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
394 idxs[*i + 0] = *v + 0;
395 idxs[*i + 1] = *v + 2;
396 idxs[*i + 2] = *v + 1;
397 idxs[*i + 3] = *v + 0;
398 idxs[*i + 4] = *v + 3;
399 idxs[*i + 5] = *v + 2;
404 if (Segment::kLine == segb.fType) {
405 verts[*v + 0].fPos = fanPt;
406 verts[*v + 1].fPos = sega.endPt();
407 verts[*v + 2].fPos = segb.fPts[0];
409 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
410 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
412 // we draw the line edge as a degenerate quad (u is 0, v is the
413 // signed distance to the edge)
414 SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
416 verts[*v + 0].fUV.set(0, dist);
417 verts[*v + 1].fUV.set(0, 0);
418 verts[*v + 2].fUV.set(0, 0);
419 verts[*v + 3].fUV.set(0, -SK_Scalar1);
420 verts[*v + 4].fUV.set(0, -SK_Scalar1);
422 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
423 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
424 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
425 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
426 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
428 idxs[*i + 0] = *v + 0;
429 idxs[*i + 1] = *v + 2;
430 idxs[*i + 2] = *v + 1;
432 idxs[*i + 3] = *v + 3;
433 idxs[*i + 4] = *v + 1;
434 idxs[*i + 5] = *v + 2;
436 idxs[*i + 6] = *v + 4;
437 idxs[*i + 7] = *v + 3;
438 idxs[*i + 8] = *v + 2;
443 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
445 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
448 verts[*v + 0].fPos = fanPt;
449 verts[*v + 1].fPos = qpts[0];
450 verts[*v + 2].fPos = qpts[2];
451 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
452 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
453 verts[*v + 5].fPos = qpts[1] + midVec;
455 SkScalar c = segb.fNorms[0].dot(qpts[0]);
456 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
457 verts[*v + 1].fD0 = 0.f;
458 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
459 verts[*v + 3].fD0 = -SK_ScalarMax/100;
460 verts[*v + 4].fD0 = -SK_ScalarMax/100;
461 verts[*v + 5].fD0 = -SK_ScalarMax/100;
463 c = segb.fNorms[1].dot(qpts[2]);
464 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
465 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
466 verts[*v + 2].fD1 = 0.f;
467 verts[*v + 3].fD1 = -SK_ScalarMax/100;
468 verts[*v + 4].fD1 = -SK_ScalarMax/100;
469 verts[*v + 5].fD1 = -SK_ScalarMax/100;
471 GrPathUtils::QuadUVMatrix toUV(qpts);
472 toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
474 idxs[*i + 0] = *v + 3;
475 idxs[*i + 1] = *v + 1;
476 idxs[*i + 2] = *v + 2;
477 idxs[*i + 3] = *v + 4;
478 idxs[*i + 4] = *v + 3;
479 idxs[*i + 5] = *v + 2;
481 idxs[*i + 6] = *v + 5;
482 idxs[*i + 7] = *v + 3;
483 idxs[*i + 8] = *v + 4;
485 idxs[*i + 9] = *v + 0;
486 idxs[*i + 10] = *v + 2;
487 idxs[*i + 11] = *v + 1;
495 ///////////////////////////////////////////////////////////////////////////////
498 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
499 * two components of the vertex attribute. Coverage is based on signed
500 * distance with negative being inside, positive outside. The edge is specified in
501 * window space (y-down). If either the third or fourth component of the interpolated
502 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
503 * attempt to trim to a portion of the infinite quad.
504 * Requires shader derivative instruction support.
507 class QuadEdgeEffect : public GrGeometryProcessor {
510 static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatrix) {
511 return SkNEW_ARGS(QuadEdgeEffect, (color, localMatrix));
514 virtual ~QuadEdgeEffect() {}
516 const char* name() const SK_OVERRIDE { return "QuadEdge"; }
518 const Attribute* inPosition() const { return fInPosition; }
519 const Attribute* inQuadEdge() const { return fInQuadEdge; }
521 class GLProcessor : public GrGLGeometryProcessor {
523 GLProcessor(const GrGeometryProcessor&,
524 const GrBatchTracker&)
525 : fColor(GrColor_ILLEGAL) {}
527 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) SK_OVERRIDE {
528 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
529 GrGLGPBuilder* pb = args.fPB;
530 GrGLVertexBuilder* vsBuilder = pb->getVertexShaderBuilder();
533 vsBuilder->emitAttributes(qe);
535 GrGLVertToFrag v(kVec4f_GrSLType);
536 args.fPB->addVarying("QuadEdge", &v);
537 vsBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
539 const BatchTracker& local = args.fBT.cast<BatchTracker>();
541 // Setup pass through color
542 this->setupColorPassThrough(pb, local.fInputColorType, args.fOutputColor, NULL,
546 this->setupPosition(pb, gpArgs, qe.inPosition()->fName, qe.viewMatrix());
549 this->emitTransforms(args.fPB, gpArgs->fPositionVar, qe.inPosition()->fName,
550 qe.localMatrix(), args.fTransformsIn, args.fTransformsOut);
552 GrGLGPFragmentBuilder* fsBuilder = args.fPB->getFragmentShaderBuilder();
554 SkAssertResult(fsBuilder->enableFeature(
555 GrGLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
556 fsBuilder->codeAppendf("float edgeAlpha;");
558 // keep the derivative instructions outside the conditional
559 fsBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
560 fsBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
561 fsBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
562 // today we know z and w are in device space. We could use derivatives
563 fsBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
565 fsBuilder->codeAppendf ("} else {");
566 fsBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
567 " 2.0*%s.x*duvdy.x - duvdy.y);",
569 fsBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
571 fsBuilder->codeAppendf("edgeAlpha = "
572 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
574 fsBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
577 static inline void GenKey(const GrGeometryProcessor& gp,
578 const GrBatchTracker& bt,
580 GrProcessorKeyBuilder* b) {
581 const BatchTracker& local = bt.cast<BatchTracker>();
582 uint32_t key = local.fInputColorType << 16;
583 key |= local.fUsesLocalCoords && gp.localMatrix().hasPerspective() ? 0x1 : 0x0;
584 key |= ComputePosKey(gp.viewMatrix()) << 1;
588 virtual void setData(const GrGLProgramDataManager& pdman,
589 const GrPrimitiveProcessor& gp,
590 const GrBatchTracker& bt) SK_OVERRIDE {
591 this->setUniformViewMatrix(pdman, gp.viewMatrix());
593 const BatchTracker& local = bt.cast<BatchTracker>();
594 if (kUniform_GrGPInput == local.fInputColorType && local.fColor != fColor) {
596 GrColorToRGBAFloat(local.fColor, c);
597 pdman.set4fv(fColorUniform, 1, c);
598 fColor = local.fColor;
604 UniformHandle fColorUniform;
606 typedef GrGLGeometryProcessor INHERITED;
609 virtual void getGLProcessorKey(const GrBatchTracker& bt,
610 const GrGLCaps& caps,
611 GrProcessorKeyBuilder* b) const SK_OVERRIDE {
612 GLProcessor::GenKey(*this, bt, caps, b);
615 virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
616 const GrGLCaps&) const SK_OVERRIDE {
617 return SkNEW_ARGS(GLProcessor, (*this, bt));
620 void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
621 BatchTracker* local = bt->cast<BatchTracker>();
622 local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
623 local->fUsesLocalCoords = init.fUsesLocalCoords;
626 bool onCanMakeEqual(const GrBatchTracker& m,
627 const GrGeometryProcessor& that,
628 const GrBatchTracker& t) const SK_OVERRIDE {
629 const BatchTracker& mine = m.cast<BatchTracker>();
630 const BatchTracker& theirs = t.cast<BatchTracker>();
631 return CanCombineLocalMatrices(*this, mine.fUsesLocalCoords,
632 that, theirs.fUsesLocalCoords) &&
633 CanCombineOutput(mine.fInputColorType, mine.fColor,
634 theirs.fInputColorType, theirs.fColor);
638 QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix)
639 : INHERITED(color, SkMatrix::I(), localMatrix) {
640 this->initClassID<QuadEdgeEffect>();
641 fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVertexAttribType));
642 fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVertexAttribType));
645 bool onIsEqual(const GrGeometryProcessor& other) const SK_OVERRIDE {
649 void onGetInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
650 out->setUnknownSingleComponent();
653 struct BatchTracker {
654 GrGPInput fInputColorType;
656 bool fUsesLocalCoords;
659 const Attribute* fInPosition;
660 const Attribute* fInQuadEdge;
662 GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
664 typedef GrGeometryProcessor INHERITED;
667 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
669 GrGeometryProcessor* QuadEdgeEffect::TestCreate(SkRandom* random,
671 const GrDrawTargetCaps& caps,
673 // Doesn't work without derivative instructions.
674 return caps.shaderDerivativeSupport() ?
675 QuadEdgeEffect::Create(GrRandomColor(random),
676 GrProcessorUnitTest::TestMatrix(random)) : NULL;
679 ///////////////////////////////////////////////////////////////////////////////
681 bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target,
682 const GrPipelineBuilder*,
683 const SkMatrix& viewMatrix,
685 const SkStrokeRec& stroke,
686 bool antiAlias) const {
687 return (target->caps()->shaderDerivativeSupport() && antiAlias &&
688 stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex());
691 class AAConvexPathBatch : public GrBatch {
695 SkMatrix fViewMatrix;
699 static GrBatch* Create(const Geometry& geometry) {
700 return SkNEW_ARGS(AAConvexPathBatch, (geometry));
703 const char* name() const SK_OVERRIDE { return "AAConvexBatch"; }
705 void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
706 // When this is called on a batch, there is only one geometry bundle
707 out->setKnownFourComponents(fGeoData[0].fColor);
709 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
710 out->setUnknownSingleComponent();
713 void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
714 // Handle any color overrides
715 if (init.fColorIgnored) {
716 fGeoData[0].fColor = GrColor_ILLEGAL;
717 } else if (GrColor_ILLEGAL != init.fOverrideColor) {
718 fGeoData[0].fColor = init.fOverrideColor;
721 // setup batch properties
722 fBatch.fColorIgnored = init.fColorIgnored;
723 fBatch.fColor = fGeoData[0].fColor;
724 fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
725 fBatch.fCoverageIgnored = init.fCoverageIgnored;
728 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
729 int instanceCount = fGeoData.count();
732 if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
733 SkDebugf("Could not invert viewmatrix\n");
737 // Setup GrGeometryProcessor
738 SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(this->color(),
741 batchTarget->initDraw(quadProcessor, pipeline);
743 // TODO remove this when batch is everywhere
745 init.fColorIgnored = fBatch.fColorIgnored;
746 init.fOverrideColor = GrColor_ILLEGAL;
747 init.fCoverageIgnored = fBatch.fCoverageIgnored;
748 init.fUsesLocalCoords = this->usesLocalCoords();
749 quadProcessor->initBatchTracker(batchTarget->currentBatchTracker(), init);
751 // TODO generate all segments for all paths and use one vertex buffer
752 for (int i = 0; i < instanceCount; i++) {
753 Geometry& args = fGeoData[i];
755 // We use the fact that SkPath::transform path does subdivision based on
756 // perspective. Otherwise, we apply the view matrix when copying to the
757 // segment representation.
758 const SkMatrix* viewMatrix = &args.fViewMatrix;
759 if (viewMatrix->hasPerspective()) {
760 args.fPath.transform(*viewMatrix);
761 viewMatrix = &SkMatrix::I();
767 kPreallocSegmentCnt = 512 / sizeof(Segment),
768 kPreallocDrawCnt = 4,
770 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
773 if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &vertexCount,
778 const GrVertexBuffer* vertexBuffer;
781 size_t vertexStride = quadProcessor->getVertexStride();
782 void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
788 SkDebugf("Could not allocate vertices\n");
792 const GrIndexBuffer* indexBuffer;
795 void *indices = batchTarget->indexPool()->makeSpace(indexCount,
800 SkDebugf("Could not allocate indices\n");
804 QuadVertex* verts = reinterpret_cast<QuadVertex*>(vertices);
805 uint16_t* idxs = reinterpret_cast<uint16_t*>(indices);
807 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
808 create_vertices(segments, fanPt, &draws, verts, idxs);
810 GrDrawTarget::DrawInfo info;
811 info.setVertexBuffer(vertexBuffer);
812 info.setIndexBuffer(indexBuffer);
813 info.setPrimitiveType(kTriangles_GrPrimitiveType);
814 info.setStartIndex(firstIndex);
817 for (int i = 0; i < draws.count(); ++i) {
818 const Draw& draw = draws[i];
819 info.setStartVertex(vOffset + firstVertex);
820 info.setVertexCount(draw.fVertexCnt);
821 info.setIndexCount(draw.fIndexCnt);
822 batchTarget->draw(info);
823 vOffset += draw.fVertexCnt;
828 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
831 AAConvexPathBatch(const Geometry& geometry) {
832 this->initClassID<AAConvexPathBatch>();
833 fGeoData.push_back(geometry);
836 bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
837 AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
839 if (this->color() != that->color()) {
843 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
844 if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
848 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
852 GrColor color() const { return fBatch.fColor; }
853 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
854 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
856 struct BatchTracker {
858 bool fUsesLocalCoords;
860 bool fCoverageIgnored;
864 SkSTArray<1, Geometry, true> fGeoData;
867 bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target,
868 GrPipelineBuilder* pipelineBuilder,
874 if (path.isEmpty()) {
878 // We outset our vertices one pixel and add one more pixel for precision.
879 // TODO create tighter bounds when we start reordering.
880 SkRect devRect = path.getBounds();
881 vm.mapRect(&devRect);
882 devRect.outset(2, 2);
884 AAConvexPathBatch::Geometry geometry;
885 geometry.fColor = color;
886 geometry.fViewMatrix = vm;
887 geometry.fPath = path;
889 SkAutoTUnref<GrBatch> batch(AAConvexPathBatch::Create(geometry));
890 target->drawBatch(pipelineBuilder, batch, &devRect);