317f9a0f8d037c93755b631ff9a0c15b4fa02ef1
[platform/upstream/libSkiaSharp.git] / src / gpu / GrAAConvexPathRenderer.cpp
1
2 /*
3  * Copyright 2012 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8
9 #include "GrAAConvexPathRenderer.h"
10
11 #include "GrBatch.h"
12 #include "GrBatchTarget.h"
13 #include "GrBufferAllocPool.h"
14 #include "GrContext.h"
15 #include "GrDrawTargetCaps.h"
16 #include "GrGeometryProcessor.h"
17 #include "GrInvariantOutput.h"
18 #include "GrPathUtils.h"
19 #include "GrProcessor.h"
20 #include "GrPipelineBuilder.h"
21 #include "SkGeometry.h"
22 #include "SkString.h"
23 #include "SkStrokeRec.h"
24 #include "SkTraceEvent.h"
25 #include "gl/GrGLProcessor.h"
26 #include "gl/GrGLSL.h"
27 #include "gl/GrGLGeometryProcessor.h"
28 #include "gl/builders/GrGLProgramBuilder.h"
29
30 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
31 }
32
33 struct Segment {
34     enum {
35         // These enum values are assumed in member functions below.
36         kLine = 0,
37         kQuad = 1,
38     } fType;
39
40     // line uses one pt, quad uses 2 pts
41     SkPoint fPts[2];
42     // normal to edge ending at each pt
43     SkVector fNorms[2];
44     // is the corner where the previous segment meets this segment
45     // sharp. If so, fMid is a normalized bisector facing outward.
46     SkVector fMid;
47
48     int countPoints() {
49         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
50         return fType + 1;
51     }
52     const SkPoint& endPt() const {
53         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
54         return fPts[fType];
55     };
56     const SkPoint& endNorm() const {
57         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
58         return fNorms[fType];
59     };
60 };
61
62 typedef SkTArray<Segment, true> SegmentArray;
63
64 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
65     SkScalar area = 0;
66     SkPoint center = {0, 0};
67     int count = segments.count();
68     SkPoint p0 = {0, 0};
69     if (count > 2) {
70         // We translate the polygon so that the first point is at the origin.
71         // This avoids some precision issues with small area polygons far away
72         // from the origin.
73         p0 = segments[0].endPt();
74         SkPoint pi;
75         SkPoint pj;
76         // the first and last iteration of the below loop would compute
77         // zeros since the starting / ending point is (0,0). So instead we start
78         // at i=1 and make the last iteration i=count-2.
79         pj = segments[1].endPt() - p0;
80         for (int i = 1; i < count - 1; ++i) {
81             pi = pj;
82             const SkPoint pj = segments[i + 1].endPt() - p0;
83
84             SkScalar t = SkScalarMul(pi.fX, pj.fY) - SkScalarMul(pj.fX, pi.fY);
85             area += t;
86             center.fX += (pi.fX + pj.fX) * t;
87             center.fY += (pi.fY + pj.fY) * t;
88
89         }
90     }
91     // If the poly has no area then we instead return the average of
92     // its points.
93     if (SkScalarNearlyZero(area)) {
94         SkPoint avg;
95         avg.set(0, 0);
96         for (int i = 0; i < count; ++i) {
97             const SkPoint& pt = segments[i].endPt();
98             avg.fX += pt.fX;
99             avg.fY += pt.fY;
100         }
101         SkScalar denom = SK_Scalar1 / count;
102         avg.scale(denom);
103         *c = avg;
104     } else {
105         area *= 3;
106         area = SkScalarDiv(SK_Scalar1, area);
107         center.fX = SkScalarMul(center.fX, area);
108         center.fY = SkScalarMul(center.fY, area);
109         // undo the translate of p0 to the origin.
110         *c = center + p0;
111     }
112     SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
113 }
114
115 static void compute_vectors(SegmentArray* segments,
116                             SkPoint* fanPt,
117                             SkPath::Direction dir,
118                             int* vCount,
119                             int* iCount) {
120     center_of_mass(*segments, fanPt);
121     int count = segments->count();
122
123     // Make the normals point towards the outside
124     SkPoint::Side normSide;
125     if (dir == SkPath::kCCW_Direction) {
126         normSide = SkPoint::kRight_Side;
127     } else {
128         normSide = SkPoint::kLeft_Side;
129     }
130
131     *vCount = 0;
132     *iCount = 0;
133     // compute normals at all points
134     for (int a = 0; a < count; ++a) {
135         Segment& sega = (*segments)[a];
136         int b = (a + 1) % count;
137         Segment& segb = (*segments)[b];
138
139         const SkPoint* prevPt = &sega.endPt();
140         int n = segb.countPoints();
141         for (int p = 0; p < n; ++p) {
142             segb.fNorms[p] = segb.fPts[p] - *prevPt;
143             segb.fNorms[p].normalize();
144             segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
145             prevPt = &segb.fPts[p];
146         }
147         if (Segment::kLine == segb.fType) {
148             *vCount += 5;
149             *iCount += 9;
150         } else {
151             *vCount += 6;
152             *iCount += 12;
153         }
154     }
155
156     // compute mid-vectors where segments meet. TODO: Detect shallow corners
157     // and leave out the wedges and close gaps by stitching segments together.
158     for (int a = 0; a < count; ++a) {
159         const Segment& sega = (*segments)[a];
160         int b = (a + 1) % count;
161         Segment& segb = (*segments)[b];
162         segb.fMid = segb.fNorms[0] + sega.endNorm();
163         segb.fMid.normalize();
164         // corner wedges
165         *vCount += 4;
166         *iCount += 6;
167     }
168 }
169
170 struct DegenerateTestData {
171     DegenerateTestData() { fStage = kInitial; }
172     bool isDegenerate() const { return kNonDegenerate != fStage; }
173     enum {
174         kInitial,
175         kPoint,
176         kLine,
177         kNonDegenerate
178     }           fStage;
179     SkPoint     fFirstPoint;
180     SkVector    fLineNormal;
181     SkScalar    fLineC;
182 };
183
184 static const SkScalar kClose = (SK_Scalar1 / 16);
185 static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
186
187 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
188     switch (data->fStage) {
189         case DegenerateTestData::kInitial:
190             data->fFirstPoint = pt;
191             data->fStage = DegenerateTestData::kPoint;
192             break;
193         case DegenerateTestData::kPoint:
194             if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
195                 data->fLineNormal = pt - data->fFirstPoint;
196                 data->fLineNormal.normalize();
197                 data->fLineNormal.setOrthog(data->fLineNormal);
198                 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
199                 data->fStage = DegenerateTestData::kLine;
200             }
201             break;
202         case DegenerateTestData::kLine:
203             if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
204                 data->fStage = DegenerateTestData::kNonDegenerate;
205             }
206         case DegenerateTestData::kNonDegenerate:
207             break;
208         default:
209             SkFAIL("Unexpected degenerate test stage.");
210     }
211 }
212
213 static inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPath::Direction* dir) {
214     if (!path.cheapComputeDirection(dir)) {
215         return false;
216     }
217     // check whether m reverses the orientation
218     SkASSERT(!m.hasPerspective());
219     SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
220                       SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
221     if (det2x2 < 0) {
222         *dir = SkPath::OppositeDirection(*dir);
223     }
224     return true;
225 }
226
227 static inline void add_line_to_segment(const SkPoint& pt,
228                                        SegmentArray* segments) {
229     segments->push_back();
230     segments->back().fType = Segment::kLine;
231     segments->back().fPts[0] = pt;
232 }
233
234 static inline void add_quad_segment(const SkPoint pts[3],
235                                     SegmentArray* segments) {
236     if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
237         if (pts[0] != pts[2]) {
238             add_line_to_segment(pts[2], segments);
239         }
240     } else {
241         segments->push_back();
242         segments->back().fType = Segment::kQuad;
243         segments->back().fPts[0] = pts[1];
244         segments->back().fPts[1] = pts[2];
245     }
246 }
247
248 static inline void add_cubic_segments(const SkPoint pts[4],
249                                       SkPath::Direction dir,
250                                       SegmentArray* segments) {
251     SkSTArray<15, SkPoint, true> quads;
252     GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads);
253     int count = quads.count();
254     for (int q = 0; q < count; q += 3) {
255         add_quad_segment(&quads[q], segments);
256     }
257 }
258
259 static bool get_segments(const SkPath& path,
260                          const SkMatrix& m,
261                          SegmentArray* segments,
262                          SkPoint* fanPt,
263                          int* vCount,
264                          int* iCount) {
265     SkPath::Iter iter(path, true);
266     // This renderer over-emphasizes very thin path regions. We use the distance
267     // to the path from the sample to compute coverage. Every pixel intersected
268     // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
269     // notice that the sample may be close to a very thin area of the path and
270     // thus should be very light. This is particularly egregious for degenerate
271     // line paths. We detect paths that are very close to a line (zero area) and
272     // draw nothing.
273     DegenerateTestData degenerateData;
274     SkPath::Direction dir;
275     // get_direction can fail for some degenerate paths.
276     if (!get_direction(path, m, &dir)) {
277         return false;
278     }
279
280     for (;;) {
281         SkPoint pts[4];
282         SkPath::Verb verb = iter.next(pts);
283         switch (verb) {
284             case SkPath::kMove_Verb:
285                 m.mapPoints(pts, 1);
286                 update_degenerate_test(&degenerateData, pts[0]);
287                 break;
288             case SkPath::kLine_Verb: {
289                 m.mapPoints(&pts[1], 1);
290                 update_degenerate_test(&degenerateData, pts[1]);
291                 add_line_to_segment(pts[1], segments);
292                 break;
293             }
294             case SkPath::kQuad_Verb:
295                 m.mapPoints(pts, 3);
296                 update_degenerate_test(&degenerateData, pts[1]);
297                 update_degenerate_test(&degenerateData, pts[2]);
298                 add_quad_segment(pts, segments);
299                 break;
300             case SkPath::kConic_Verb: {
301                 m.mapPoints(pts, 3);
302                 SkScalar weight = iter.conicWeight();
303                 SkAutoConicToQuads converter;
304                 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
305                 for (int i = 0; i < converter.countQuads(); ++i) {
306                     update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
307                     update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
308                     add_quad_segment(quadPts + 2*i, segments);
309                 }
310                 break;
311             }
312             case SkPath::kCubic_Verb: {
313                 m.mapPoints(pts, 4);
314                 update_degenerate_test(&degenerateData, pts[1]);
315                 update_degenerate_test(&degenerateData, pts[2]);
316                 update_degenerate_test(&degenerateData, pts[3]);
317                 add_cubic_segments(pts, dir, segments);
318                 break;
319             };
320             case SkPath::kDone_Verb:
321                 if (degenerateData.isDegenerate()) {
322                     return false;
323                 } else {
324                     compute_vectors(segments, fanPt, dir, vCount, iCount);
325                     return true;
326                 }
327             default:
328                 break;
329         }
330     }
331 }
332
333 struct QuadVertex {
334     SkPoint  fPos;
335     SkPoint  fUV;
336     SkScalar fD0;
337     SkScalar fD1;
338 };
339
340 struct Draw {
341     Draw() : fVertexCnt(0), fIndexCnt(0) {}
342     int fVertexCnt;
343     int fIndexCnt;
344 };
345
346 typedef SkTArray<Draw, true> DrawArray;
347
348 static void create_vertices(const SegmentArray&  segments,
349                             const SkPoint& fanPt,
350                             DrawArray*     draws,
351                             QuadVertex*    verts,
352                             uint16_t*      idxs) {
353     Draw* draw = &draws->push_back();
354     // alias just to make vert/index assignments easier to read.
355     int* v = &draw->fVertexCnt;
356     int* i = &draw->fIndexCnt;
357
358     int count = segments.count();
359     for (int a = 0; a < count; ++a) {
360         const Segment& sega = segments[a];
361         int b = (a + 1) % count;
362         const Segment& segb = segments[b];
363
364         // Check whether adding the verts for this segment to the current draw would cause index
365         // values to overflow.
366         int vCount = 4;
367         if (Segment::kLine == segb.fType) {
368             vCount += 5;
369         } else {
370             vCount += 6;
371         }
372         if (draw->fVertexCnt + vCount > (1 << 16)) {
373             verts += *v;
374             idxs += *i;
375             draw = &draws->push_back();
376             v = &draw->fVertexCnt;
377             i = &draw->fIndexCnt;
378         }
379
380         // FIXME: These tris are inset in the 1 unit arc around the corner
381         verts[*v + 0].fPos = sega.endPt();
382         verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
383         verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
384         verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
385         verts[*v + 0].fUV.set(0,0);
386         verts[*v + 1].fUV.set(0,-SK_Scalar1);
387         verts[*v + 2].fUV.set(0,-SK_Scalar1);
388         verts[*v + 3].fUV.set(0,-SK_Scalar1);
389         verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
390         verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
391         verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
392         verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
393
394         idxs[*i + 0] = *v + 0;
395         idxs[*i + 1] = *v + 2;
396         idxs[*i + 2] = *v + 1;
397         idxs[*i + 3] = *v + 0;
398         idxs[*i + 4] = *v + 3;
399         idxs[*i + 5] = *v + 2;
400
401         *v += 4;
402         *i += 6;
403
404         if (Segment::kLine == segb.fType) {
405             verts[*v + 0].fPos = fanPt;
406             verts[*v + 1].fPos = sega.endPt();
407             verts[*v + 2].fPos = segb.fPts[0];
408
409             verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
410             verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
411
412             // we draw the line edge as a degenerate quad (u is 0, v is the
413             // signed distance to the edge)
414             SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
415                                                         verts[*v + 2].fPos);
416             verts[*v + 0].fUV.set(0, dist);
417             verts[*v + 1].fUV.set(0, 0);
418             verts[*v + 2].fUV.set(0, 0);
419             verts[*v + 3].fUV.set(0, -SK_Scalar1);
420             verts[*v + 4].fUV.set(0, -SK_Scalar1);
421
422             verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
423             verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
424             verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
425             verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
426             verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
427
428             idxs[*i + 0] = *v + 0;
429             idxs[*i + 1] = *v + 2;
430             idxs[*i + 2] = *v + 1;
431
432             idxs[*i + 3] = *v + 3;
433             idxs[*i + 4] = *v + 1;
434             idxs[*i + 5] = *v + 2;
435
436             idxs[*i + 6] = *v + 4;
437             idxs[*i + 7] = *v + 3;
438             idxs[*i + 8] = *v + 2;
439
440             *v += 5;
441             *i += 9;
442         } else {
443             SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
444
445             SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
446             midVec.normalize();
447
448             verts[*v + 0].fPos = fanPt;
449             verts[*v + 1].fPos = qpts[0];
450             verts[*v + 2].fPos = qpts[2];
451             verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
452             verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
453             verts[*v + 5].fPos = qpts[1] + midVec;
454
455             SkScalar c = segb.fNorms[0].dot(qpts[0]);
456             verts[*v + 0].fD0 =  -segb.fNorms[0].dot(fanPt) + c;
457             verts[*v + 1].fD0 =  0.f;
458             verts[*v + 2].fD0 =  -segb.fNorms[0].dot(qpts[2]) + c;
459             verts[*v + 3].fD0 = -SK_ScalarMax/100;
460             verts[*v + 4].fD0 = -SK_ScalarMax/100;
461             verts[*v + 5].fD0 = -SK_ScalarMax/100;
462
463             c = segb.fNorms[1].dot(qpts[2]);
464             verts[*v + 0].fD1 =  -segb.fNorms[1].dot(fanPt) + c;
465             verts[*v + 1].fD1 =  -segb.fNorms[1].dot(qpts[0]) + c;
466             verts[*v + 2].fD1 =  0.f;
467             verts[*v + 3].fD1 = -SK_ScalarMax/100;
468             verts[*v + 4].fD1 = -SK_ScalarMax/100;
469             verts[*v + 5].fD1 = -SK_ScalarMax/100;
470
471             GrPathUtils::QuadUVMatrix toUV(qpts);
472             toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
473
474             idxs[*i + 0] = *v + 3;
475             idxs[*i + 1] = *v + 1;
476             idxs[*i + 2] = *v + 2;
477             idxs[*i + 3] = *v + 4;
478             idxs[*i + 4] = *v + 3;
479             idxs[*i + 5] = *v + 2;
480
481             idxs[*i + 6] = *v + 5;
482             idxs[*i + 7] = *v + 3;
483             idxs[*i + 8] = *v + 4;
484
485             idxs[*i +  9] = *v + 0;
486             idxs[*i + 10] = *v + 2;
487             idxs[*i + 11] = *v + 1;
488
489             *v += 6;
490             *i += 12;
491         }
492     }
493 }
494
495 ///////////////////////////////////////////////////////////////////////////////
496
497 /*
498  * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
499  * two components of the vertex attribute. Coverage is based on signed
500  * distance with negative being inside, positive outside. The edge is specified in
501  * window space (y-down). If either the third or fourth component of the interpolated
502  * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
503  * attempt to trim to a portion of the infinite quad.
504  * Requires shader derivative instruction support.
505  */
506
507 class QuadEdgeEffect : public GrGeometryProcessor {
508 public:
509
510     static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatrix) {
511         return SkNEW_ARGS(QuadEdgeEffect, (color, localMatrix));
512     }
513
514     virtual ~QuadEdgeEffect() {}
515
516     const char* name() const SK_OVERRIDE { return "QuadEdge"; }
517
518     const Attribute* inPosition() const { return fInPosition; }
519     const Attribute* inQuadEdge() const { return fInQuadEdge; }
520
521     class GLProcessor : public GrGLGeometryProcessor {
522     public:
523         GLProcessor(const GrGeometryProcessor&,
524                     const GrBatchTracker&)
525             : fColor(GrColor_ILLEGAL) {}
526
527         void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) SK_OVERRIDE {
528             const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
529             GrGLGPBuilder* pb = args.fPB;
530             GrGLVertexBuilder* vsBuilder = pb->getVertexShaderBuilder();
531
532             // emit attributes
533             vsBuilder->emitAttributes(qe);
534
535             GrGLVertToFrag v(kVec4f_GrSLType);
536             args.fPB->addVarying("QuadEdge", &v);
537             vsBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
538
539             const BatchTracker& local = args.fBT.cast<BatchTracker>();
540
541             // Setup pass through color
542             this->setupColorPassThrough(pb, local.fInputColorType, args.fOutputColor, NULL,
543                                         &fColorUniform);
544
545             // Setup position
546             this->setupPosition(pb, gpArgs, qe.inPosition()->fName, qe.viewMatrix());
547
548             // emit transforms
549             this->emitTransforms(args.fPB, gpArgs->fPositionVar, qe.inPosition()->fName,
550                                  qe.localMatrix(), args.fTransformsIn, args.fTransformsOut);
551
552             GrGLGPFragmentBuilder* fsBuilder = args.fPB->getFragmentShaderBuilder();
553
554             SkAssertResult(fsBuilder->enableFeature(
555                     GrGLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
556             fsBuilder->codeAppendf("float edgeAlpha;");
557
558             // keep the derivative instructions outside the conditional
559             fsBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
560             fsBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
561             fsBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
562             // today we know z and w are in device space. We could use derivatives
563             fsBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
564                                     v.fsIn());
565             fsBuilder->codeAppendf ("} else {");
566             fsBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
567                                    "               2.0*%s.x*duvdy.x - duvdy.y);",
568                                    v.fsIn(), v.fsIn());
569             fsBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
570                                     v.fsIn());
571             fsBuilder->codeAppendf("edgeAlpha = "
572                                    "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
573
574             fsBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
575         }
576
577         static inline void GenKey(const GrGeometryProcessor& gp,
578                                   const GrBatchTracker& bt,
579                                   const GrGLCaps&,
580                                   GrProcessorKeyBuilder* b) {
581             const BatchTracker& local = bt.cast<BatchTracker>();
582             uint32_t key = local.fInputColorType << 16;
583             key |= local.fUsesLocalCoords && gp.localMatrix().hasPerspective() ? 0x1 : 0x0;
584             key |= ComputePosKey(gp.viewMatrix()) << 1;
585             b->add32(key);
586         }
587
588         virtual void setData(const GrGLProgramDataManager& pdman,
589                              const GrPrimitiveProcessor& gp,
590                              const GrBatchTracker& bt) SK_OVERRIDE {
591             this->setUniformViewMatrix(pdman, gp.viewMatrix());
592
593             const BatchTracker& local = bt.cast<BatchTracker>();
594             if (kUniform_GrGPInput == local.fInputColorType && local.fColor != fColor) {
595                 GrGLfloat c[4];
596                 GrColorToRGBAFloat(local.fColor, c);
597                 pdman.set4fv(fColorUniform, 1, c);
598                 fColor = local.fColor;
599             }
600         }
601
602     private:
603         GrColor fColor;
604         UniformHandle fColorUniform;
605
606         typedef GrGLGeometryProcessor INHERITED;
607     };
608
609     virtual void getGLProcessorKey(const GrBatchTracker& bt,
610                                    const GrGLCaps& caps,
611                                    GrProcessorKeyBuilder* b) const SK_OVERRIDE {
612         GLProcessor::GenKey(*this, bt, caps, b);
613     }
614
615     virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
616                                                      const GrGLCaps&) const SK_OVERRIDE {
617         return SkNEW_ARGS(GLProcessor, (*this, bt));
618     }
619
620     void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
621         BatchTracker* local = bt->cast<BatchTracker>();
622         local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
623         local->fUsesLocalCoords = init.fUsesLocalCoords;
624     }
625
626     bool onCanMakeEqual(const GrBatchTracker& m,
627                         const GrGeometryProcessor& that,
628                         const GrBatchTracker& t) const SK_OVERRIDE {
629         const BatchTracker& mine = m.cast<BatchTracker>();
630         const BatchTracker& theirs = t.cast<BatchTracker>();
631         return CanCombineLocalMatrices(*this, mine.fUsesLocalCoords,
632                                        that, theirs.fUsesLocalCoords) &&
633                CanCombineOutput(mine.fInputColorType, mine.fColor,
634                                 theirs.fInputColorType, theirs.fColor);
635     }
636
637 private:
638     QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix)
639         : INHERITED(color, SkMatrix::I(), localMatrix) {
640         this->initClassID<QuadEdgeEffect>();
641         fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVertexAttribType));
642         fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVertexAttribType));
643     }
644
645     bool onIsEqual(const GrGeometryProcessor& other) const SK_OVERRIDE {
646         return true;
647     }
648
649     void onGetInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
650         out->setUnknownSingleComponent();
651     }
652
653     struct BatchTracker {
654         GrGPInput fInputColorType;
655         GrColor fColor;
656         bool fUsesLocalCoords;
657     };
658
659     const Attribute* fInPosition;
660     const Attribute* fInQuadEdge;
661
662     GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
663
664     typedef GrGeometryProcessor INHERITED;
665 };
666
667 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
668
669 GrGeometryProcessor* QuadEdgeEffect::TestCreate(SkRandom* random,
670                                                 GrContext*,
671                                                 const GrDrawTargetCaps& caps,
672                                                 GrTexture*[]) {
673     // Doesn't work without derivative instructions.
674     return caps.shaderDerivativeSupport() ?
675            QuadEdgeEffect::Create(GrRandomColor(random),
676                                   GrProcessorUnitTest::TestMatrix(random)) : NULL;
677 }
678
679 ///////////////////////////////////////////////////////////////////////////////
680
681 bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target,
682                                          const GrPipelineBuilder*,
683                                          const SkMatrix& viewMatrix,
684                                          const SkPath& path,
685                                          const SkStrokeRec& stroke,
686                                          bool antiAlias) const {
687     return (target->caps()->shaderDerivativeSupport() && antiAlias &&
688             stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex());
689 }
690
691 class AAConvexPathBatch : public GrBatch {
692 public:
693     struct Geometry {
694         GrColor fColor;
695         SkMatrix fViewMatrix;
696         SkPath fPath;
697     };
698
699     static GrBatch* Create(const Geometry& geometry) {
700         return SkNEW_ARGS(AAConvexPathBatch, (geometry));
701     }
702
703     const char* name() const SK_OVERRIDE { return "AAConvexBatch"; }
704
705     void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
706         // When this is called on a batch, there is only one geometry bundle
707         out->setKnownFourComponents(fGeoData[0].fColor);
708     }
709     void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
710         out->setUnknownSingleComponent();
711     }
712
713     void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
714         // Handle any color overrides
715         if (init.fColorIgnored) {
716             fGeoData[0].fColor = GrColor_ILLEGAL;
717         } else if (GrColor_ILLEGAL != init.fOverrideColor) {
718             fGeoData[0].fColor = init.fOverrideColor;
719         }
720
721         // setup batch properties
722         fBatch.fColorIgnored = init.fColorIgnored;
723         fBatch.fColor = fGeoData[0].fColor;
724         fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
725         fBatch.fCoverageIgnored = init.fCoverageIgnored;
726     }
727
728     void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
729         int instanceCount = fGeoData.count();
730
731         SkMatrix invert;
732         if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
733             SkDebugf("Could not invert viewmatrix\n");
734             return;
735         }
736
737         // Setup GrGeometryProcessor
738         SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(this->color(),
739                                                                                invert));
740
741         batchTarget->initDraw(quadProcessor, pipeline);
742
743         // TODO remove this when batch is everywhere
744         GrPipelineInfo init;
745         init.fColorIgnored = fBatch.fColorIgnored;
746         init.fOverrideColor = GrColor_ILLEGAL;
747         init.fCoverageIgnored = fBatch.fCoverageIgnored;
748         init.fUsesLocalCoords = this->usesLocalCoords();
749         quadProcessor->initBatchTracker(batchTarget->currentBatchTracker(), init);
750
751         // TODO generate all segments for all paths and use one vertex buffer
752         for (int i = 0; i < instanceCount; i++) {
753             Geometry& args = fGeoData[i];
754
755             // We use the fact that SkPath::transform path does subdivision based on
756             // perspective. Otherwise, we apply the view matrix when copying to the
757             // segment representation.
758             const SkMatrix* viewMatrix = &args.fViewMatrix;
759             if (viewMatrix->hasPerspective()) {
760                 args.fPath.transform(*viewMatrix);
761                 viewMatrix = &SkMatrix::I();
762             }
763
764             int vertexCount;
765             int indexCount;
766             enum {
767                 kPreallocSegmentCnt = 512 / sizeof(Segment),
768                 kPreallocDrawCnt = 4,
769             };
770             SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
771             SkPoint fanPt;
772
773             if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &vertexCount,
774                               &indexCount)) {
775                 continue;
776             }
777
778             const GrVertexBuffer* vertexBuffer;
779             int firstVertex;
780
781             size_t vertexStride = quadProcessor->getVertexStride();
782             void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
783                                                                   vertexCount,
784                                                                   &vertexBuffer,
785                                                                   &firstVertex);
786
787             if (!vertices) {
788                 SkDebugf("Could not allocate vertices\n");
789                 return;
790             }
791
792             const GrIndexBuffer* indexBuffer;
793             int firstIndex;
794
795             void *indices = batchTarget->indexPool()->makeSpace(indexCount,
796                                                                 &indexBuffer,
797                                                                 &firstIndex);
798
799             if (!indices) {
800                 SkDebugf("Could not allocate indices\n");
801                 return;
802             }
803
804             QuadVertex* verts = reinterpret_cast<QuadVertex*>(vertices);
805             uint16_t* idxs = reinterpret_cast<uint16_t*>(indices);
806
807             SkSTArray<kPreallocDrawCnt, Draw, true> draws;
808             create_vertices(segments, fanPt, &draws, verts, idxs);
809
810             GrDrawTarget::DrawInfo info;
811             info.setVertexBuffer(vertexBuffer);
812             info.setIndexBuffer(indexBuffer);
813             info.setPrimitiveType(kTriangles_GrPrimitiveType);
814             info.setStartIndex(firstIndex);
815
816             int vOffset = 0;
817             for (int i = 0; i < draws.count(); ++i) {
818                 const Draw& draw = draws[i];
819                 info.setStartVertex(vOffset + firstVertex);
820                 info.setVertexCount(draw.fVertexCnt);
821                 info.setIndexCount(draw.fIndexCnt);
822                 batchTarget->draw(info);
823                 vOffset += draw.fVertexCnt;
824             }
825         }
826     }
827
828     SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
829
830 private:
831     AAConvexPathBatch(const Geometry& geometry) {
832         this->initClassID<AAConvexPathBatch>();
833         fGeoData.push_back(geometry);
834     }
835
836     bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
837         AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
838
839         if (this->color() != that->color()) {
840             return false;
841         }
842
843         SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
844         if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
845             return false;
846         }
847
848         fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
849         return true;
850     }
851
852     GrColor color() const { return fBatch.fColor; }
853     bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
854     const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
855
856     struct BatchTracker {
857         GrColor fColor;
858         bool fUsesLocalCoords;
859         bool fColorIgnored;
860         bool fCoverageIgnored;
861     };
862
863     BatchTracker fBatch;
864     SkSTArray<1, Geometry, true> fGeoData;
865 };
866
867 bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target,
868                                         GrPipelineBuilder* pipelineBuilder,
869                                         GrColor color,
870                                         const SkMatrix& vm,
871                                         const SkPath& path,
872                                         const SkStrokeRec&,
873                                         bool antiAlias) {
874     if (path.isEmpty()) {
875         return true;
876     }
877
878     // We outset our vertices one pixel and add one more pixel for precision.
879     // TODO create tighter bounds when we start reordering.
880     SkRect devRect = path.getBounds();
881     vm.mapRect(&devRect);
882     devRect.outset(2, 2);
883
884     AAConvexPathBatch::Geometry geometry;
885     geometry.fColor = color;
886     geometry.fViewMatrix = vm;
887     geometry.fPath = path;
888
889     SkAutoTUnref<GrBatch> batch(AAConvexPathBatch::Create(geometry));
890     target->drawBatch(pipelineBuilder, batch, &devRect);
891
892     return true;
893
894 }