Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / GrInOrderDrawBuffer.cpp
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "GrInOrderDrawBuffer.h"
9
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h"
13 #include "GrIndexBuffer.h"
14 #include "GrPath.h"
15 #include "GrPoint.h"
16 #include "GrRenderTarget.h"
17 #include "GrTemplates.h"
18 #include "GrTexture.h"
19 #include "GrVertexBuffer.h"
20
21 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
22                                          GrVertexBufferAllocPool* vertexPool,
23                                          GrIndexBufferAllocPool* indexPool)
24     : GrDrawTarget(gpu->getContext())
25     , fDstGpu(gpu)
26     , fClipSet(true)
27     , fClipProxyState(kUnknown_ClipProxyState)
28     , fVertexPool(*vertexPool)
29     , fIndexPool(*indexPool)
30     , fFlushing(false)
31     , fDrawID(0) {
32
33     fDstGpu->ref();
34     fCaps.reset(SkRef(fDstGpu->caps()));
35
36     SkASSERT(NULL != vertexPool);
37     SkASSERT(NULL != indexPool);
38
39     GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
40     poolState.fUsedPoolVertexBytes = 0;
41     poolState.fUsedPoolIndexBytes = 0;
42 #ifdef SK_DEBUG
43     poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
44     poolState.fPoolStartVertex = ~0;
45     poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
46     poolState.fPoolStartIndex = ~0;
47 #endif
48     this->reset();
49 }
50
51 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
52     this->reset();
53     // This must be called by before the GrDrawTarget destructor
54     this->releaseGeometry();
55     fDstGpu->unref();
56 }
57
58 ////////////////////////////////////////////////////////////////////////////////
59
60 namespace {
61 void get_vertex_bounds(const void* vertices,
62                        size_t vertexSize,
63                        int vertexCount,
64                        SkRect* bounds) {
65     SkASSERT(vertexSize >= sizeof(SkPoint));
66     SkASSERT(vertexCount > 0);
67     const SkPoint* point = static_cast<const SkPoint*>(vertices);
68     bounds->fLeft = bounds->fRight = point->fX;
69     bounds->fTop = bounds->fBottom = point->fY;
70     for (int i = 1; i < vertexCount; ++i) {
71         point = reinterpret_cast<SkPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
72         bounds->growToInclude(point->fX, point->fY);
73     }
74 }
75 }
76
77
78 namespace {
79
80 extern const GrVertexAttrib kRectPosColorUVAttribs[] = {
81     {kVec2f_GrVertexAttribType,  0,               kPosition_GrVertexAttribBinding},
82     {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
83     {kVec2f_GrVertexAttribType,  sizeof(SkPoint)+sizeof(GrColor),
84                                                   kLocalCoord_GrVertexAttribBinding},
85 };
86
87 extern const GrVertexAttrib kRectPosUVAttribs[] = {
88     {kVec2f_GrVertexAttribType,  0,              kPosition_GrVertexAttribBinding},
89     {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding},
90 };
91
92 static void set_vertex_attributes(GrDrawState* drawState,
93                                   bool hasColor, bool hasUVs,
94                                   int* colorOffset, int* localOffset) {
95     *colorOffset = -1;
96     *localOffset = -1;
97
98     // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing
99     // only in color is a common occurrence in tables). However, having per-vertex colors disables
100     // blending optimizations because we don't know if the color will be solid or not. These
101     // optimizations help determine whether coverage and color can be blended correctly when
102     // dual-source blending isn't available. This comes into play when there is coverage. If colors
103     // were a stage it could take a hint that every vertex's color will be opaque.
104     if (hasColor && hasUVs) {
105         *colorOffset = sizeof(SkPoint);
106         *localOffset = sizeof(SkPoint) + sizeof(GrColor);
107         drawState->setVertexAttribs<kRectPosColorUVAttribs>(3);
108     } else if (hasColor) {
109         *colorOffset = sizeof(SkPoint);
110         drawState->setVertexAttribs<kRectPosColorUVAttribs>(2);
111     } else if (hasUVs) {
112         *localOffset = sizeof(SkPoint);
113         drawState->setVertexAttribs<kRectPosUVAttribs>(2);
114     } else {
115         drawState->setVertexAttribs<kRectPosUVAttribs>(1);
116     }
117 }
118
119 };
120
121 enum {
122     kTraceCmdBit = 0x80,
123     kCmdMask = 0x7f,
124 };
125
126 static uint8_t add_trace_bit(uint8_t cmd) {
127     return cmd | kTraceCmdBit;
128 }
129
130 static uint8_t strip_trace_bit(uint8_t cmd) {
131     return cmd & kCmdMask;
132 }
133
134 static bool cmd_has_trace_marker(uint8_t cmd) {
135     return SkToBool(cmd & kTraceCmdBit);
136 }
137
138 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
139                                      const SkMatrix* matrix,
140                                      const SkRect* localRect,
141                                      const SkMatrix* localMatrix) {
142     GrDrawState::AutoColorRestore acr;
143
144     GrDrawState* drawState = this->drawState();
145
146     GrColor color = drawState->getColor();
147
148     int colorOffset, localOffset;
149     set_vertex_attributes(drawState,
150                    this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(),
151                    NULL != localRect,
152                    &colorOffset, &localOffset);
153     if (colorOffset >= 0) {
154         // We set the draw state's color to white here. This is done so that any batching performed
155         // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color
156         // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the
157         // constant color in its op== when the kColor layout bit is set and then we can remove
158         // this.
159         acr.set(drawState, 0xFFFFFFFF);
160     }
161
162     AutoReleaseGeometry geo(this, 4, 0);
163     if (!geo.succeeded()) {
164         GrPrintf("Failed to get space for vertices!\n");
165         return;
166     }
167
168     // Go to device coords to allow batching across matrix changes
169     SkMatrix combinedMatrix;
170     if (NULL != matrix) {
171         combinedMatrix = *matrix;
172     } else {
173         combinedMatrix.reset();
174     }
175     combinedMatrix.postConcat(drawState->getViewMatrix());
176     // When the caller has provided an explicit source rect for a stage then we don't want to
177     // modify that stage's matrix. Otherwise if the effect is generating its source rect from
178     // the vertex positions then we have to account for the view matrix change.
179     GrDrawState::AutoViewMatrixRestore avmr;
180     if (!avmr.setIdentity(drawState)) {
181         return;
182     }
183
184     size_t vsize = drawState->getVertexSize();
185
186     geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize);
187     combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4);
188
189     SkRect devBounds;
190     // since we already computed the dev verts, set the bounds hint. This will help us avoid
191     // unnecessary clipping in our onDraw().
192     get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds);
193
194     if (localOffset >= 0) {
195         SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset);
196         coords->setRectFan(localRect->fLeft, localRect->fTop,
197                            localRect->fRight, localRect->fBottom,
198                             vsize);
199         if (NULL != localMatrix) {
200             localMatrix->mapPointsWithStride(coords, vsize, 4);
201         }
202     }
203
204     if (colorOffset >= 0) {
205         GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset);
206         for (int i = 0; i < 4; ++i) {
207             *vertColor = color;
208             vertColor = (GrColor*) ((intptr_t) vertColor + vsize);
209         }
210     }
211
212     this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
213     this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
214
215     // to ensure that stashing the drawState ptr is valid
216     SkASSERT(this->drawState() == drawState);
217 }
218
219 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
220     if (!this->getDrawState().isClipState()) {
221         return true;
222     }
223     if (kUnknown_ClipProxyState == fClipProxyState) {
224         SkIRect rect;
225         bool iior;
226         this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
227         if (iior) {
228             // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
229             // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
230             // free via the viewport. We don't want to think that clipping must be enabled in this
231             // case. So we extend the clip outward from the edge to avoid these false negatives.
232             fClipProxyState = kValid_ClipProxyState;
233             fClipProxy = SkRect::Make(rect);
234
235             if (fClipProxy.fLeft <= 0) {
236                 fClipProxy.fLeft = SK_ScalarMin;
237             }
238             if (fClipProxy.fTop <= 0) {
239                 fClipProxy.fTop = SK_ScalarMin;
240             }
241             if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
242                 fClipProxy.fRight = SK_ScalarMax;
243             }
244             if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
245                 fClipProxy.fBottom = SK_ScalarMax;
246             }
247         } else {
248             fClipProxyState = kInvalid_ClipProxyState;
249         }
250     }
251     if (kValid_ClipProxyState == fClipProxyState) {
252         return fClipProxy.contains(devBounds);
253     }
254     SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
255                             SkIntToScalar(this->getClip()->fOrigin.fY)};
256     SkRect clipSpaceBounds = devBounds;
257     clipSpaceBounds.offset(originOffset);
258     return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
259 }
260
261 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
262     SkASSERT(info.isInstanced());
263
264     const GeometrySrcState& geomSrc = this->getGeomSrc();
265     const GrDrawState& drawState = this->getDrawState();
266
267     // we only attempt to concat the case when reserved verts are used with a client-specified index
268     // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
269     // between draws.
270     if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
271         kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
272         return 0;
273     }
274     // Check if there is a draw info that is compatible that uses the same VB from the pool and
275     // the same IB
276     if (kDraw_Cmd != strip_trace_bit(fCmds.back())) {
277         return 0;
278     }
279
280     DrawRecord* draw = &fDraws.back();
281     GeometryPoolState& poolState = fGeoPoolStateStack.back();
282     const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
283
284     if (!draw->isInstanced() ||
285         draw->verticesPerInstance() != info.verticesPerInstance() ||
286         draw->indicesPerInstance() != info.indicesPerInstance() ||
287         draw->fVertexBuffer != vertexBuffer ||
288         draw->fIndexBuffer != geomSrc.fIndexBuffer) {
289         return 0;
290     }
291     // info does not yet account for the offset from the start of the pool's VB while the previous
292     // draw record does.
293     int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
294     if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
295         return 0;
296     }
297
298     SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
299
300     // how many instances can be concat'ed onto draw given the size of the index buffer
301     int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
302     instancesToConcat -= draw->instanceCount();
303     instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
304
305     // update the amount of reserved vertex data actually referenced in draws
306     size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
307                          drawState.getVertexSize();
308     poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
309
310     draw->adjustInstanceCount(instancesToConcat);
311
312     // update last fGpuCmdMarkers to include any additional trace markers that have been added
313     if (this->getActiveTraceMarkers().count() > 0) {
314         if (cmd_has_trace_marker(fCmds.back())) {
315             fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
316         } else {
317             fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
318             fCmds.back() = add_trace_bit(fCmds.back());
319         }
320     }
321
322     return instancesToConcat;
323 }
324
325 class AutoClipReenable {
326 public:
327     AutoClipReenable() : fDrawState(NULL) {}
328     ~AutoClipReenable() {
329         if (NULL != fDrawState) {
330             fDrawState->enableState(GrDrawState::kClip_StateBit);
331         }
332     }
333     void set(GrDrawState* drawState) {
334         if (drawState->isClipState()) {
335             fDrawState = drawState;
336             drawState->disableState(GrDrawState::kClip_StateBit);
337         }
338     }
339 private:
340     GrDrawState*    fDrawState;
341 };
342
343 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
344
345     GeometryPoolState& poolState = fGeoPoolStateStack.back();
346     const GrDrawState& drawState = this->getDrawState();
347     AutoClipReenable acr;
348
349     if (drawState.isClipState() &&
350         NULL != info.getDevBounds() &&
351         this->quickInsideClip(*info.getDevBounds())) {
352         acr.set(this->drawState());
353     }
354
355     if (this->needsNewClip()) {
356        this->recordClip();
357     }
358     if (this->needsNewState()) {
359         this->recordState();
360     }
361
362     DrawRecord* draw;
363     if (info.isInstanced()) {
364         int instancesConcated = this->concatInstancedDraw(info);
365         if (info.instanceCount() > instancesConcated) {
366             draw = this->recordDraw(info);
367             draw->adjustInstanceCount(-instancesConcated);
368         } else {
369             return;
370         }
371     } else {
372         draw = this->recordDraw(info);
373     }
374
375     switch (this->getGeomSrc().fVertexSrc) {
376         case kBuffer_GeometrySrcType:
377             draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer;
378             break;
379         case kReserved_GeometrySrcType: // fallthrough
380         case kArray_GeometrySrcType: {
381             size_t vertexBytes = (info.vertexCount() + info.startVertex()) *
382                                  drawState.getVertexSize();
383             poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
384             draw->fVertexBuffer = poolState.fPoolVertexBuffer;
385             draw->adjustStartVertex(poolState.fPoolStartVertex);
386             break;
387         }
388         default:
389             SkFAIL("unknown geom src type");
390     }
391     draw->fVertexBuffer->ref();
392
393     if (info.isIndexed()) {
394         switch (this->getGeomSrc().fIndexSrc) {
395             case kBuffer_GeometrySrcType:
396                 draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer;
397                 break;
398             case kReserved_GeometrySrcType: // fallthrough
399             case kArray_GeometrySrcType: {
400                 size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
401                 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, indexBytes);
402                 draw->fIndexBuffer = poolState.fPoolIndexBuffer;
403                 draw->adjustStartIndex(poolState.fPoolStartIndex);
404                 break;
405             }
406             default:
407                 SkFAIL("unknown geom src type");
408         }
409         draw->fIndexBuffer->ref();
410     } else {
411         draw->fIndexBuffer = NULL;
412     }
413 }
414
415 GrInOrderDrawBuffer::StencilPath::StencilPath() {}
416 GrInOrderDrawBuffer::DrawPath::DrawPath() {}
417 GrInOrderDrawBuffer::DrawPaths::DrawPaths() {}
418 GrInOrderDrawBuffer::DrawPaths::~DrawPaths() {
419     if (fTransforms) {
420         SkDELETE_ARRAY(fTransforms);
421     }
422     for (int i = 0; i < fPathCount; ++i) {
423         fPaths[i]->unref();
424     }
425     SkDELETE_ARRAY(fPaths);
426 }
427
428 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
429     if (this->needsNewClip()) {
430         this->recordClip();
431     }
432     // Only compare the subset of GrDrawState relevant to path stenciling?
433     if (this->needsNewState()) {
434         this->recordState();
435     }
436     StencilPath* sp = this->recordStencilPath();
437     sp->fPath.reset(path);
438     path->ref();
439     sp->fFill = fill;
440 }
441
442 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
443                                      SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
444     if (this->needsNewClip()) {
445         this->recordClip();
446     }
447     // TODO: Only compare the subset of GrDrawState relevant to path covering?
448     if (this->needsNewState()) {
449         this->recordState();
450     }
451     DrawPath* cp = this->recordDrawPath();
452     cp->fPath.reset(path);
453     path->ref();
454     cp->fFill = fill;
455     if (NULL != dstCopy) {
456         cp->fDstCopy = *dstCopy;
457     }
458 }
459
460 void GrInOrderDrawBuffer::onDrawPaths(int pathCount, const GrPath** paths,
461                                       const SkMatrix* transforms,
462                                       SkPath::FillType fill,
463                                       SkStrokeRec::Style stroke,
464                                       const GrDeviceCoordTexture* dstCopy) {
465     SkASSERT(pathCount);
466
467     if (this->needsNewClip()) {
468         this->recordClip();
469     }
470     if (this->needsNewState()) {
471         this->recordState();
472     }
473     DrawPaths* dp = this->recordDrawPaths();
474     dp->fPathCount = pathCount;
475     dp->fPaths = SkNEW_ARRAY(const GrPath*, pathCount);
476     memcpy(dp->fPaths, paths, sizeof(GrPath*) * pathCount);
477     for (int i = 0; i < pathCount; ++i) {
478         dp->fPaths[i]->ref();
479     }
480
481     dp->fTransforms = SkNEW_ARRAY(SkMatrix, pathCount);
482     memcpy(dp->fTransforms, transforms, sizeof(SkMatrix) * pathCount);
483
484     dp->fFill = fill;
485     dp->fStroke = stroke;
486
487     if (NULL != dstCopy) {
488         dp->fDstCopy = *dstCopy;
489     }
490 }
491
492 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
493                                 bool canIgnoreRect, GrRenderTarget* renderTarget) {
494     SkIRect r;
495     if (NULL == renderTarget) {
496         renderTarget = this->drawState()->getRenderTarget();
497         SkASSERT(NULL != renderTarget);
498     }
499     if (NULL == rect) {
500         // We could do something smart and remove previous draws and clears to
501         // the current render target. If we get that smart we have to make sure
502         // those draws aren't read before this clear (render-to-texture).
503         r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
504         rect = &r;
505     }
506     Clear* clr = this->recordClear();
507     GrColorIsPMAssert(color);
508     clr->fColor = color;
509     clr->fRect = *rect;
510     clr->fCanIgnoreRect = canIgnoreRect;
511     clr->fRenderTarget = renderTarget;
512     renderTarget->ref();
513 }
514
515 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
516     if (!this->caps()->discardRenderTargetSupport()) {
517         return;
518     }
519     if (NULL == renderTarget) {
520         renderTarget = this->drawState()->getRenderTarget();
521         SkASSERT(NULL != renderTarget);
522     }
523     Clear* clr = this->recordClear();
524     clr->fColor = GrColor_ILLEGAL;
525     clr->fRenderTarget = renderTarget;
526     renderTarget->ref();
527 }
528
529 void GrInOrderDrawBuffer::reset() {
530     SkASSERT(1 == fGeoPoolStateStack.count());
531     this->resetVertexSource();
532     this->resetIndexSource();
533     int numDraws = fDraws.count();
534     for (int d = 0; d < numDraws; ++d) {
535         // we always have a VB, but not always an IB
536         SkASSERT(NULL != fDraws[d].fVertexBuffer);
537         fDraws[d].fVertexBuffer->unref();
538         SkSafeUnref(fDraws[d].fIndexBuffer);
539     }
540     fCmds.reset();
541     fDraws.reset();
542     fStencilPaths.reset();
543     fDrawPath.reset();
544     fDrawPaths.reset();
545     fStates.reset();
546     fClears.reset();
547     fVertexPool.reset();
548     fIndexPool.reset();
549     fClips.reset();
550     fClipOrigins.reset();
551     fCopySurfaces.reset();
552     fGpuCmdMarkers.reset();
553     fClipSet = true;
554 }
555
556 void GrInOrderDrawBuffer::flush() {
557     if (fFlushing) {
558         return;
559     }
560
561     SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
562     SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
563
564     int numCmds = fCmds.count();
565     if (0 == numCmds) {
566         return;
567     }
568
569     GrAutoTRestore<bool> flushRestore(&fFlushing);
570     fFlushing = true;
571
572     fVertexPool.unlock();
573     fIndexPool.unlock();
574
575     GrDrawTarget::AutoClipRestore acr(fDstGpu);
576     AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
577
578     GrDrawState playbackState;
579     GrDrawState* prevDrawState = fDstGpu->drawState();
580     prevDrawState->ref();
581     fDstGpu->setDrawState(&playbackState);
582
583     GrClipData clipData;
584
585     int currState       = 0;
586     int currClip        = 0;
587     int currClear       = 0;
588     int currDraw        = 0;
589     int currStencilPath = 0;
590     int currDrawPath    = 0;
591     int currDrawPaths   = 0;
592     int currCopySurface = 0;
593     int currCmdMarker   = 0;
594
595     for (int c = 0; c < numCmds; ++c) {
596         GrGpuTraceMarker newMarker("", -1);
597         if (cmd_has_trace_marker(fCmds[c])) {
598             SkString traceString = fGpuCmdMarkers[currCmdMarker].toString();
599             newMarker.fMarker = traceString.c_str();
600             fDstGpu->addGpuTraceMarker(&newMarker);
601             ++currCmdMarker;
602         }
603         switch (strip_trace_bit(fCmds[c])) {
604             case kDraw_Cmd: {
605                 const DrawRecord& draw = fDraws[currDraw];
606                 fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer);
607                 if (draw.isIndexed()) {
608                     fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer);
609                 }
610                 fDstGpu->executeDraw(draw);
611                 ++currDraw;
612                 break;
613             }
614             case kStencilPath_Cmd: {
615                 const StencilPath& sp = fStencilPaths[currStencilPath];
616                 fDstGpu->stencilPath(sp.fPath.get(), sp.fFill);
617                 ++currStencilPath;
618                 break;
619             }
620             case kDrawPath_Cmd: {
621                 const DrawPath& cp = fDrawPath[currDrawPath];
622                 fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill,
623                                          NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL);
624                 ++currDrawPath;
625                 break;
626             }
627             case kDrawPaths_Cmd: {
628                 DrawPaths& dp = fDrawPaths[currDrawPaths];
629                 const GrDeviceCoordTexture* dstCopy =
630                     NULL != dp.fDstCopy.texture() ? &dp.fDstCopy : NULL;
631                 fDstGpu->executeDrawPaths(dp.fPathCount, dp.fPaths,
632                                           dp.fTransforms, dp.fFill, dp.fStroke,
633                                           dstCopy);
634                 ++currDrawPaths;
635                 break;
636             }
637             case kSetState_Cmd:
638                 fStates[currState].restoreTo(&playbackState);
639                 ++currState;
640                 break;
641             case kSetClip_Cmd:
642                 clipData.fClipStack = &fClips[currClip];
643                 clipData.fOrigin = fClipOrigins[currClip];
644                 fDstGpu->setClip(&clipData);
645                 ++currClip;
646                 break;
647             case kClear_Cmd:
648                 if (GrColor_ILLEGAL == fClears[currClear].fColor) {
649                     fDstGpu->discard(fClears[currClear].fRenderTarget);
650                 } else {
651                     fDstGpu->clear(&fClears[currClear].fRect,
652                                    fClears[currClear].fColor,
653                                    fClears[currClear].fCanIgnoreRect,
654                                    fClears[currClear].fRenderTarget);
655                 }
656                 ++currClear;
657                 break;
658             case kCopySurface_Cmd:
659                 fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(),
660                                      fCopySurfaces[currCopySurface].fSrc.get(),
661                                      fCopySurfaces[currCopySurface].fSrcRect,
662                                      fCopySurfaces[currCopySurface].fDstPoint);
663                 ++currCopySurface;
664                 break;
665         }
666         if (cmd_has_trace_marker(fCmds[c])) {
667             fDstGpu->removeGpuTraceMarker(&newMarker);
668         }
669     }
670     // we should have consumed all the states, clips, etc.
671     SkASSERT(fStates.count() == currState);
672     SkASSERT(fClips.count() == currClip);
673     SkASSERT(fClipOrigins.count() == currClip);
674     SkASSERT(fClears.count() == currClear);
675     SkASSERT(fDraws.count()  == currDraw);
676     SkASSERT(fCopySurfaces.count() == currCopySurface);
677     SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
678
679     fDstGpu->setDrawState(prevDrawState);
680     prevDrawState->unref();
681     this->reset();
682     ++fDrawID;
683 }
684
685 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
686                                         GrSurface* src,
687                                         const SkIRect& srcRect,
688                                         const SkIPoint& dstPoint) {
689     if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
690         CopySurface* cs = this->recordCopySurface();
691         cs->fDst.reset(SkRef(dst));
692         cs->fSrc.reset(SkRef(src));
693         cs->fSrcRect = srcRect;
694         cs->fDstPoint = dstPoint;
695         return true;
696     } else {
697         return false;
698     }
699 }
700
701 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
702                                            GrSurface* src,
703                                            const SkIRect& srcRect,
704                                            const SkIPoint& dstPoint) {
705     return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
706 }
707
708 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
709     fDstGpu->initCopySurfaceDstDesc(src, desc);
710 }
711
712 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
713                                                          int indexCount) {
714     // We use geometryHints() to know whether to flush the draw buffer. We
715     // can't flush if we are inside an unbalanced pushGeometrySource.
716     // Moreover, flushing blows away vertex and index data that was
717     // previously reserved. So if the vertex or index data is pulled from
718     // reserved space and won't be released by this request then we can't
719     // flush.
720     bool insideGeoPush = fGeoPoolStateStack.count() > 1;
721
722     bool unreleasedVertexSpace =
723         !vertexCount &&
724         kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
725
726     bool unreleasedIndexSpace =
727         !indexCount &&
728         kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
729
730     // we don't want to finalize any reserved geom on the target since
731     // we don't know that the client has finished writing to it.
732     bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
733
734     int vcount = vertexCount;
735     int icount = indexCount;
736
737     if (!insideGeoPush &&
738         !unreleasedVertexSpace &&
739         !unreleasedIndexSpace &&
740         !targetHasReservedGeom &&
741         this->geometryHints(&vcount, &icount)) {
742
743         this->flush();
744     }
745 }
746
747 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
748                                         int* indexCount) const {
749     // we will recommend a flush if the data could fit in a single
750     // preallocated buffer but none are left and it can't fit
751     // in the current buffer (which may not be prealloced).
752     bool flush = false;
753     if (NULL != indexCount) {
754         int32_t currIndices = fIndexPool.currentBufferIndices();
755         if (*indexCount > currIndices &&
756             (!fIndexPool.preallocatedBuffersRemaining() &&
757              *indexCount <= fIndexPool.preallocatedBufferIndices())) {
758
759             flush = true;
760         }
761         *indexCount = currIndices;
762     }
763     if (NULL != vertexCount) {
764         size_t vertexSize = this->getDrawState().getVertexSize();
765         int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize);
766         if (*vertexCount > currVertices &&
767             (!fVertexPool.preallocatedBuffersRemaining() &&
768              *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) {
769
770             flush = true;
771         }
772         *vertexCount = currVertices;
773     }
774     return flush;
775 }
776
777 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
778                                                int vertexCount,
779                                                void** vertices) {
780     GeometryPoolState& poolState = fGeoPoolStateStack.back();
781     SkASSERT(vertexCount > 0);
782     SkASSERT(NULL != vertices);
783     SkASSERT(0 == poolState.fUsedPoolVertexBytes);
784
785     *vertices = fVertexPool.makeSpace(vertexSize,
786                                       vertexCount,
787                                       &poolState.fPoolVertexBuffer,
788                                       &poolState.fPoolStartVertex);
789     return NULL != *vertices;
790 }
791
792 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
793     GeometryPoolState& poolState = fGeoPoolStateStack.back();
794     SkASSERT(indexCount > 0);
795     SkASSERT(NULL != indices);
796     SkASSERT(0 == poolState.fUsedPoolIndexBytes);
797
798     *indices = fIndexPool.makeSpace(indexCount,
799                                     &poolState.fPoolIndexBuffer,
800                                     &poolState.fPoolStartIndex);
801     return NULL != *indices;
802 }
803
804 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
805     GeometryPoolState& poolState = fGeoPoolStateStack.back();
806     const GeometrySrcState& geoSrc = this->getGeomSrc();
807
808     // If we get a release vertex space call then our current source should either be reserved
809     // or array (which we copied into reserved space).
810     SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
811              kArray_GeometrySrcType == geoSrc.fVertexSrc);
812
813     // When the caller reserved vertex buffer space we gave it back a pointer
814     // provided by the vertex buffer pool. At each draw we tracked the largest
815     // offset into the pool's pointer that was referenced. Now we return to the
816     // pool any portion at the tail of the allocation that no draw referenced.
817     size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
818     fVertexPool.putBack(reservedVertexBytes -
819                         poolState.fUsedPoolVertexBytes);
820     poolState.fUsedPoolVertexBytes = 0;
821     poolState.fPoolVertexBuffer = NULL;
822     poolState.fPoolStartVertex = 0;
823 }
824
825 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
826     GeometryPoolState& poolState = fGeoPoolStateStack.back();
827     const GeometrySrcState& geoSrc = this->getGeomSrc();
828
829     // If we get a release index space call then our current source should either be reserved
830     // or array (which we copied into reserved space).
831     SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
832              kArray_GeometrySrcType == geoSrc.fIndexSrc);
833
834     // Similar to releaseReservedVertexSpace we return any unused portion at
835     // the tail
836     size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
837     fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
838     poolState.fUsedPoolIndexBytes = 0;
839     poolState.fPoolIndexBuffer = NULL;
840     poolState.fPoolStartIndex = 0;
841 }
842
843 void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
844                                                    int vertexCount) {
845
846     GeometryPoolState& poolState = fGeoPoolStateStack.back();
847     SkASSERT(0 == poolState.fUsedPoolVertexBytes);
848 #ifdef SK_DEBUG
849     bool success =
850 #endif
851     fVertexPool.appendVertices(this->getVertexSize(),
852                                vertexCount,
853                                vertexArray,
854                                &poolState.fPoolVertexBuffer,
855                                &poolState.fPoolStartVertex);
856     GR_DEBUGASSERT(success);
857 }
858
859 void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
860                                                   int indexCount) {
861     GeometryPoolState& poolState = fGeoPoolStateStack.back();
862     SkASSERT(0 == poolState.fUsedPoolIndexBytes);
863 #ifdef SK_DEBUG
864     bool success =
865 #endif
866     fIndexPool.appendIndices(indexCount,
867                              indexArray,
868                              &poolState.fPoolIndexBuffer,
869                              &poolState.fPoolStartIndex);
870     GR_DEBUGASSERT(success);
871 }
872
873 void GrInOrderDrawBuffer::releaseVertexArray() {
874     // When the client provides an array as the vertex source we handled it
875     // by copying their array into reserved space.
876     this->GrInOrderDrawBuffer::releaseReservedVertexSpace();
877 }
878
879 void GrInOrderDrawBuffer::releaseIndexArray() {
880     // When the client provides an array as the index source we handled it
881     // by copying their array into reserved space.
882     this->GrInOrderDrawBuffer::releaseReservedIndexSpace();
883 }
884
885 void GrInOrderDrawBuffer::geometrySourceWillPush() {
886     GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
887     poolState.fUsedPoolVertexBytes = 0;
888     poolState.fUsedPoolIndexBytes = 0;
889 #ifdef SK_DEBUG
890     poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
891     poolState.fPoolStartVertex = ~0;
892     poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
893     poolState.fPoolStartIndex = ~0;
894 #endif
895 }
896
897 void GrInOrderDrawBuffer::geometrySourceWillPop(
898                                         const GeometrySrcState& restoredState) {
899     SkASSERT(fGeoPoolStateStack.count() > 1);
900     fGeoPoolStateStack.pop_back();
901     GeometryPoolState& poolState = fGeoPoolStateStack.back();
902     // we have to assume that any slack we had in our vertex/index data
903     // is now unreleasable because data may have been appended later in the
904     // pool.
905     if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
906         kArray_GeometrySrcType == restoredState.fVertexSrc) {
907         poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
908     }
909     if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
910         kArray_GeometrySrcType == restoredState.fIndexSrc) {
911         poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
912                                          restoredState.fIndexCount;
913     }
914 }
915
916 bool GrInOrderDrawBuffer::needsNewState() const {
917     return fStates.empty() || !fStates.back().isEqual(this->getDrawState());
918 }
919
920 bool GrInOrderDrawBuffer::needsNewClip() const {
921     SkASSERT(fClips.count() == fClipOrigins.count());
922     if (this->getDrawState().isClipState()) {
923        if (fClipSet &&
924            (fClips.empty() ||
925             fClips.back() != *this->getClip()->fClipStack ||
926             fClipOrigins.back() != this->getClip()->fOrigin)) {
927            return true;
928        }
929     }
930     return false;
931 }
932
933 void GrInOrderDrawBuffer::addToCmdBuffer(uint8_t cmd) {
934     SkASSERT(!cmd_has_trace_marker(cmd));
935     const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
936     if (activeTraceMarkers.count() > 0) {
937         fCmds.push_back(add_trace_bit(cmd));
938         fGpuCmdMarkers.push_back(activeTraceMarkers);
939     } else {
940         fCmds.push_back(cmd);
941     }
942 }
943
944 void GrInOrderDrawBuffer::recordClip() {
945     fClips.push_back(*this->getClip()->fClipStack);
946     fClipOrigins.push_back() = this->getClip()->fOrigin;
947     fClipSet = false;
948     this->addToCmdBuffer(kSetClip_Cmd);
949 }
950
951 void GrInOrderDrawBuffer::recordState() {
952     fStates.push_back().saveFrom(this->getDrawState());
953     this->addToCmdBuffer(kSetState_Cmd);
954 }
955
956 GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) {
957     this->addToCmdBuffer(kDraw_Cmd);
958     return &fDraws.push_back(info);
959 }
960
961 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() {
962     this->addToCmdBuffer(kStencilPath_Cmd);
963     return &fStencilPaths.push_back();
964 }
965
966 GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() {
967     this->addToCmdBuffer(kDrawPath_Cmd);
968     return &fDrawPath.push_back();
969 }
970
971 GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths() {
972     this->addToCmdBuffer(kDrawPaths_Cmd);
973     return &fDrawPaths.push_back();
974 }
975
976 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() {
977     this->addToCmdBuffer(kClear_Cmd);
978     return &fClears.push_back();
979 }
980
981 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() {
982     this->addToCmdBuffer(kCopySurface_Cmd);
983     return &fCopySurfaces.push_back();
984 }
985
986
987 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
988     INHERITED::clipWillBeSet(newClipData);
989     fClipSet = true;
990     fClipProxyState = kUnknown_ClipProxyState;
991 }