Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / GrInOrderDrawBuffer.cpp
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "GrInOrderDrawBuffer.h"
9
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
12 #include "GrTextStrike.h"
13 #include "GrGpu.h"
14 #include "GrTemplates.h"
15 #include "GrTexture.h"
16
17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18                                          GrVertexBufferAllocPool* vertexPool,
19                                          GrIndexBufferAllocPool* indexPool)
20     : INHERITED(gpu->getContext())
21     , fCmdBuffer(kCmdBufferInitialSizeInBytes)
22     , fLastState(NULL)
23     , fLastClip(NULL)
24     , fDstGpu(gpu)
25     , fClipSet(true)
26     , fClipProxyState(kUnknown_ClipProxyState)
27     , fVertexPool(*vertexPool)
28     , fIndexPool(*indexPool)
29     , fFlushing(false)
30     , fDrawID(0) {
31
32     fDstGpu->ref();
33     fCaps.reset(SkRef(fDstGpu->caps()));
34
35     SkASSERT(vertexPool);
36     SkASSERT(indexPool);
37
38     GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
39     poolState.fUsedPoolVertexBytes = 0;
40     poolState.fUsedPoolIndexBytes = 0;
41 #ifdef SK_DEBUG
42     poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
43     poolState.fPoolStartVertex = ~0;
44     poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
45     poolState.fPoolStartIndex = ~0;
46 #endif
47     this->reset();
48 }
49
50 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
51     this->reset();
52     // This must be called by before the GrDrawTarget destructor
53     this->releaseGeometry();
54     fDstGpu->unref();
55 }
56
57 ////////////////////////////////////////////////////////////////////////////////
58
59 namespace {
60 void get_vertex_bounds(const void* vertices,
61                        size_t vertexSize,
62                        int vertexCount,
63                        SkRect* bounds) {
64     SkASSERT(vertexSize >= sizeof(SkPoint));
65     SkASSERT(vertexCount > 0);
66     const SkPoint* point = static_cast<const SkPoint*>(vertices);
67     bounds->fLeft = bounds->fRight = point->fX;
68     bounds->fTop = bounds->fBottom = point->fY;
69     for (int i = 1; i < vertexCount; ++i) {
70         point = reinterpret_cast<SkPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
71         bounds->growToInclude(point->fX, point->fY);
72     }
73 }
74 }
75
76
77 namespace {
78
79 extern const GrVertexAttrib kRectAttribs[] = {
80     {kVec2f_GrVertexAttribType,  0,                               kPosition_GrVertexAttribBinding},
81     {kVec4ub_GrVertexAttribType, sizeof(SkPoint),                 kColor_GrVertexAttribBinding},
82     {kVec2f_GrVertexAttribType,  sizeof(SkPoint)+sizeof(GrColor), kLocalCoord_GrVertexAttribBinding},
83 };
84 }
85
86 /** We always use per-vertex colors so that rects can be batched across color changes. Sometimes we
87     have explicit local coords and sometimes not. We *could* always provide explicit local coords
88     and just duplicate the positions when the caller hasn't provided a local coord rect, but we
89     haven't seen a use case which frequently switches between local rect and no local rect draws.
90
91     The color param is used to determine whether the opaque hint can be set on the draw state.
92     The caller must populate the vertex colors itself.
93
94     The vertex attrib order is always pos, color, [local coords].
95  */
96 static void set_vertex_attributes(GrDrawState* drawState, bool hasLocalCoords, GrColor color) {
97     if (hasLocalCoords) {
98         drawState->setVertexAttribs<kRectAttribs>(3, 2 * sizeof(SkPoint) + sizeof(SkColor));
99     } else {
100         drawState->setVertexAttribs<kRectAttribs>(2, sizeof(SkPoint) + sizeof(SkColor));
101     }
102     if (0xFF == GrColorUnpackA(color)) {
103         drawState->setHint(GrDrawState::kVertexColorsAreOpaque_Hint, true);
104     }
105 }
106
107 enum {
108     kTraceCmdBit = 0x80,
109     kCmdMask = 0x7f,
110 };
111
112 static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; }
113
114 static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; }
115
116 static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTraceCmdBit); }
117
118 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
119                                      const SkRect* localRect,
120                                      const SkMatrix* localMatrix) {
121     GrDrawState* drawState = this->drawState();
122
123     GrColor color = drawState->getColor();
124
125     set_vertex_attributes(drawState, SkToBool(localRect),  color);
126
127     AutoReleaseGeometry geo(this, 4, 0);
128     if (!geo.succeeded()) {
129         SkDebugf("Failed to get space for vertices!\n");
130         return;
131     }
132
133     // Go to device coords to allow batching across matrix changes
134     SkMatrix matrix = drawState->getViewMatrix();
135
136     // When the caller has provided an explicit source rect for a stage then we don't want to
137     // modify that stage's matrix. Otherwise if the effect is generating its source rect from
138     // the vertex positions then we have to account for the view matrix change.
139     GrDrawState::AutoViewMatrixRestore avmr;
140     if (!avmr.setIdentity(drawState)) {
141         return;
142     }
143
144     size_t vstride = drawState->getVertexStride();
145
146     geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vstride);
147     matrix.mapPointsWithStride(geo.positions(), vstride, 4);
148
149     SkRect devBounds;
150     // since we already computed the dev verts, set the bounds hint. This will help us avoid
151     // unnecessary clipping in our onDraw().
152     get_vertex_bounds(geo.vertices(), vstride, 4, &devBounds);
153
154     if (localRect) {
155         static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
156         SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) + kLocalOffset);
157         coords->setRectFan(localRect->fLeft, localRect->fTop,
158                            localRect->fRight, localRect->fBottom,
159                            vstride);
160         if (localMatrix) {
161             localMatrix->mapPointsWithStride(coords, vstride, 4);
162         }
163     }
164
165     static const int kColorOffset = sizeof(SkPoint);
166     GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + kColorOffset);
167     for (int i = 0; i < 4; ++i) {
168         *vertColor = color;
169         vertColor = (GrColor*) ((intptr_t) vertColor + vstride);
170     }
171
172     this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
173     this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
174
175     // to ensure that stashing the drawState ptr is valid
176     SkASSERT(this->drawState() == drawState);
177 }
178
179 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
180     if (!this->getDrawState().isClipState()) {
181         return true;
182     }
183     if (kUnknown_ClipProxyState == fClipProxyState) {
184         SkIRect rect;
185         bool iior;
186         this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
187         if (iior) {
188             // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
189             // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
190             // free via the viewport. We don't want to think that clipping must be enabled in this
191             // case. So we extend the clip outward from the edge to avoid these false negatives.
192             fClipProxyState = kValid_ClipProxyState;
193             fClipProxy = SkRect::Make(rect);
194
195             if (fClipProxy.fLeft <= 0) {
196                 fClipProxy.fLeft = SK_ScalarMin;
197             }
198             if (fClipProxy.fTop <= 0) {
199                 fClipProxy.fTop = SK_ScalarMin;
200             }
201             if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
202                 fClipProxy.fRight = SK_ScalarMax;
203             }
204             if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
205                 fClipProxy.fBottom = SK_ScalarMax;
206             }
207         } else {
208             fClipProxyState = kInvalid_ClipProxyState;
209         }
210     }
211     if (kValid_ClipProxyState == fClipProxyState) {
212         return fClipProxy.contains(devBounds);
213     }
214     SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
215                             SkIntToScalar(this->getClip()->fOrigin.fY)};
216     SkRect clipSpaceBounds = devBounds;
217     clipSpaceBounds.offset(originOffset);
218     return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
219 }
220
221 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
222     SkASSERT(!fCmdBuffer.empty());
223     SkASSERT(info.isInstanced());
224
225     const GeometrySrcState& geomSrc = this->getGeomSrc();
226     const GrDrawState& drawState = this->getDrawState();
227
228     // we only attempt to concat the case when reserved verts are used with a client-specified index
229     // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
230     // between draws.
231     if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
232         kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
233         return 0;
234     }
235     // Check if there is a draw info that is compatible that uses the same VB from the pool and
236     // the same IB
237     if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
238         return 0;
239     }
240
241     Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
242     GeometryPoolState& poolState = fGeoPoolStateStack.back();
243     const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
244
245     if (!draw->fInfo.isInstanced() ||
246         draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
247         draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
248         draw->vertexBuffer() != vertexBuffer ||
249         draw->indexBuffer() != geomSrc.fIndexBuffer) {
250         return 0;
251     }
252     // info does not yet account for the offset from the start of the pool's VB while the previous
253     // draw record does.
254     int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
255     if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != adjustedStartVertex) {
256         return 0;
257     }
258
259     SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fInfo.vertexCount());
260
261     // how many instances can be concat'ed onto draw given the size of the index buffer
262     int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
263     instancesToConcat -= draw->fInfo.instanceCount();
264     instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
265
266     // update the amount of reserved vertex data actually referenced in draws
267     size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
268                          drawState.getVertexStride();
269     poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
270
271     draw->fInfo.adjustInstanceCount(instancesToConcat);
272
273     // update last fGpuCmdMarkers to include any additional trace markers that have been added
274     if (this->getActiveTraceMarkers().count() > 0) {
275         if (cmd_has_trace_marker(draw->fType)) {
276             fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
277         } else {
278             fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
279             draw->fType = add_trace_bit(draw->fType);
280         }
281     }
282
283     return instancesToConcat;
284 }
285
286 class AutoClipReenable {
287 public:
288     AutoClipReenable() : fDrawState(NULL) {}
289     ~AutoClipReenable() {
290         if (fDrawState) {
291             fDrawState->enableState(GrDrawState::kClip_StateBit);
292         }
293     }
294     void set(GrDrawState* drawState) {
295         if (drawState->isClipState()) {
296             fDrawState = drawState;
297             drawState->disableState(GrDrawState::kClip_StateBit);
298         }
299     }
300 private:
301     GrDrawState*    fDrawState;
302 };
303
304 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
305
306     GeometryPoolState& poolState = fGeoPoolStateStack.back();
307     const GrDrawState& drawState = this->getDrawState();
308     AutoClipReenable acr;
309
310     if (drawState.isClipState() &&
311         info.getDevBounds() &&
312         this->quickInsideClip(*info.getDevBounds())) {
313         acr.set(this->drawState());
314     }
315
316     this->recordClipIfNecessary();
317     this->recordStateIfNecessary();
318
319     const GrVertexBuffer* vb;
320     if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) {
321         vb = this->getGeomSrc().fVertexBuffer;
322     } else {
323         vb = poolState.fPoolVertexBuffer;
324     }
325
326     const GrIndexBuffer* ib = NULL;
327     if (info.isIndexed()) {
328         if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) {
329             ib = this->getGeomSrc().fIndexBuffer;
330         } else {
331             ib = poolState.fPoolIndexBuffer;
332         }
333     }
334
335     Draw* draw;
336     if (info.isInstanced()) {
337         int instancesConcated = this->concatInstancedDraw(info);
338         if (info.instanceCount() > instancesConcated) {
339             draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
340             draw->fInfo.adjustInstanceCount(-instancesConcated);
341         } else {
342             return;
343         }
344     } else {
345         draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
346     }
347     this->recordTraceMarkersIfNecessary();
348
349     // Adjust the starting vertex and index when we are using reserved or array sources to
350     // compensate for the fact that the data was inserted into a larger vb/ib owned by the pool.
351     if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
352         size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.getVertexStride();
353         poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
354         draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex);
355     }
356     
357     if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndexSrc) {
358         size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
359         poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, bytes);
360         draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex);
361     }
362 }
363
364 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, GrPathRendering::FillType fill) {
365     this->recordClipIfNecessary();
366     // Only compare the subset of GrDrawState relevant to path stenciling?
367     this->recordStateIfNecessary();
368     StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path));
369     sp->fFill = fill;
370     this->recordTraceMarkersIfNecessary();
371 }
372
373 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
374                                      GrPathRendering::FillType fill,
375                                      const GrDeviceCoordTexture* dstCopy) {
376     this->recordClipIfNecessary();
377     // TODO: Only compare the subset of GrDrawState relevant to path covering?
378     this->recordStateIfNecessary();
379     DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
380     dp->fFill = fill;
381     if (dstCopy) {
382         dp->fDstCopy = *dstCopy;
383     }
384     this->recordTraceMarkersIfNecessary();
385 }
386
387 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
388                                       const uint32_t indices[], int count,
389                                       const float transforms[], PathTransformType transformsType,
390                                       GrPathRendering::FillType fill,
391                                       const GrDeviceCoordTexture* dstCopy) {
392     SkASSERT(pathRange);
393     SkASSERT(indices);
394     SkASSERT(transforms);
395
396     this->recordClipIfNecessary();
397     this->recordStateIfNecessary();
398
399     int sizeOfIndices = sizeof(uint32_t) * count;
400     int sizeOfTransforms = sizeof(float) * count *
401                            GrPathRendering::PathTransformSize(transformsType);
402
403     DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange),
404                                                        sizeOfIndices + sizeOfTransforms);
405     memcpy(dp->indices(), indices, sizeOfIndices);
406     dp->fCount = count;
407     memcpy(dp->transforms(), transforms, sizeOfTransforms);
408     dp->fTransformsType = transformsType;
409     dp->fFill = fill;
410     if (dstCopy) {
411         dp->fDstCopy = *dstCopy;
412     }
413
414     this->recordTraceMarkersIfNecessary();
415 }
416
417 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color,
418                                   bool canIgnoreRect, GrRenderTarget* renderTarget) {
419     SkIRect r;
420     if (NULL == renderTarget) {
421         renderTarget = this->drawState()->getRenderTarget();
422         SkASSERT(renderTarget);
423     }
424     if (NULL == rect) {
425         // We could do something smart and remove previous draws and clears to
426         // the current render target. If we get that smart we have to make sure
427         // those draws aren't read before this clear (render-to-texture).
428         r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
429         rect = &r;
430     }
431     Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
432     GrColorIsPMAssert(color);
433     clr->fColor = color;
434     clr->fRect = *rect;
435     clr->fCanIgnoreRect = canIgnoreRect;
436     this->recordTraceMarkersIfNecessary();
437 }
438
439 void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect,
440                                            bool insideClip,
441                                            GrRenderTarget* renderTarget) {
442     if (NULL == renderTarget) {
443         renderTarget = this->drawState()->getRenderTarget();
444         SkASSERT(renderTarget);
445     }
446     ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
447     clr->fRect = rect;
448     clr->fInsideClip = insideClip;
449     this->recordTraceMarkersIfNecessary();
450 }
451
452 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
453     SkASSERT(renderTarget);
454     if (!this->caps()->discardRenderTargetSupport()) {
455         return;
456     }
457     Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
458     clr->fColor = GrColor_ILLEGAL;
459     this->recordTraceMarkersIfNecessary();
460 }
461
462 void GrInOrderDrawBuffer::reset() {
463     SkASSERT(1 == fGeoPoolStateStack.count());
464     this->resetVertexSource();
465     this->resetIndexSource();
466
467     fCmdBuffer.reset();
468     fLastState = NULL;
469     fLastClip = NULL;
470     fVertexPool.reset();
471     fIndexPool.reset();
472     fGpuCmdMarkers.reset();
473     fClipSet = true;
474 }
475
476 void GrInOrderDrawBuffer::flush() {
477     if (fFlushing) {
478         return;
479     }
480
481     this->getContext()->getFontCache()->updateTextures();
482
483     SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
484     SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
485
486     if (fCmdBuffer.empty()) {
487         return;
488     }
489
490     GrAutoTRestore<bool> flushRestore(&fFlushing);
491     fFlushing = true;
492
493     fVertexPool.unmap();
494     fIndexPool.unmap();
495
496     GrDrawTarget::AutoClipRestore acr(fDstGpu);
497     AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
498
499     GrDrawState* prevDrawState = SkRef(fDstGpu->drawState());
500
501     CmdBuffer::Iter iter(fCmdBuffer);
502
503     int currCmdMarker = 0;
504     fDstGpu->saveActiveTraceMarkers();
505
506     while (iter.next()) {
507         GrGpuTraceMarker newMarker("", -1);
508         SkString traceString;
509         if (cmd_has_trace_marker(iter->fType)) {
510             traceString = fGpuCmdMarkers[currCmdMarker].toString();
511             newMarker.fMarker = traceString.c_str();
512             fDstGpu->addGpuTraceMarker(&newMarker);
513             ++currCmdMarker;
514         }
515
516         SkDEBUGCODE(bool isDraw = kDraw_Cmd == strip_trace_bit(iter->fType) ||
517                                   kStencilPath_Cmd == strip_trace_bit(iter->fType) ||
518                                   kDrawPath_Cmd == strip_trace_bit(iter->fType) ||
519                                   kDrawPaths_Cmd == strip_trace_bit(iter->fType));
520         SkASSERT(!isDraw || fDstGpu->drawState() != prevDrawState);
521
522         iter->execute(fDstGpu);
523
524         if (cmd_has_trace_marker(iter->fType)) {
525             fDstGpu->removeGpuTraceMarker(&newMarker);
526         }
527     }
528
529     fDstGpu->restoreActiveTraceMarkers();
530     SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
531
532     fDstGpu->setDrawState(prevDrawState);
533     prevDrawState->unref();
534     this->reset();
535     ++fDrawID;
536 }
537
538 void GrInOrderDrawBuffer::Draw::execute(GrClipTarget* gpu) {
539     gpu->setVertexSourceToBuffer(this->vertexBuffer());
540     if (fInfo.isIndexed()) {
541         gpu->setIndexSourceToBuffer(this->indexBuffer());
542     }
543     gpu->executeDraw(fInfo);
544 }
545
546 void GrInOrderDrawBuffer::StencilPath::execute(GrClipTarget* gpu) {
547     gpu->stencilPath(this->path(), fFill);
548 }
549
550 void GrInOrderDrawBuffer::DrawPath::execute(GrClipTarget* gpu) {
551     gpu->executeDrawPath(this->path(), fFill, fDstCopy.texture() ? &fDstCopy : NULL);
552 }
553
554 void GrInOrderDrawBuffer::DrawPaths::execute(GrClipTarget* gpu) {
555     gpu->executeDrawPaths(this->pathRange(), this->indices(), fCount, this->transforms(),
556                           fTransformsType, fFill, fDstCopy.texture() ? &fDstCopy : NULL);
557 }
558
559 void GrInOrderDrawBuffer::SetState::execute(GrClipTarget* gpu) {
560     gpu->setDrawState(&fState);
561 }
562
563 void GrInOrderDrawBuffer::SetClip::execute(GrClipTarget* gpu) {
564     // Our fClipData is referenced directly, so we must remain alive for the entire
565     // duration of the flush (after which the gpu's previous clip is restored).
566     gpu->setClip(&fClipData);
567 }
568
569 void GrInOrderDrawBuffer::Clear::execute(GrClipTarget* gpu) {
570     if (GrColor_ILLEGAL == fColor) {
571         gpu->discard(this->renderTarget());
572     } else {
573         gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
574     }
575 }
576
577 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrClipTarget* gpu) {
578         gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
579 }
580
581 void GrInOrderDrawBuffer::CopySurface::execute(GrClipTarget* gpu) {
582     gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
583 }
584
585 bool GrInOrderDrawBuffer::copySurface(GrSurface* dst,
586                                       GrSurface* src,
587                                       const SkIRect& srcRect,
588                                       const SkIPoint& dstPoint) {
589     if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
590         CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
591         cs->fSrcRect = srcRect;
592         cs->fDstPoint = dstPoint;
593         this->recordTraceMarkersIfNecessary();
594         return true;
595     } else if (GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint)) {
596         GrDrawTarget::copySurface(dst, src, srcRect, dstPoint);
597         return true;
598     } else {
599         return false;
600     }
601 }
602
603 bool GrInOrderDrawBuffer::canCopySurface(GrSurface* dst,
604                                          GrSurface* src,
605                                          const SkIRect& srcRect,
606                                          const SkIPoint& dstPoint) {
607     return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint) ||
608            GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint);
609 }
610
611 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
612     fDstGpu->initCopySurfaceDstDesc(src, desc);
613 }
614
615 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
616                                                          int indexCount) {
617     // We use geometryHints() to know whether to flush the draw buffer. We
618     // can't flush if we are inside an unbalanced pushGeometrySource.
619     // Moreover, flushing blows away vertex and index data that was
620     // previously reserved. So if the vertex or index data is pulled from
621     // reserved space and won't be released by this request then we can't
622     // flush.
623     bool insideGeoPush = fGeoPoolStateStack.count() > 1;
624
625     bool unreleasedVertexSpace =
626         !vertexCount &&
627         kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
628
629     bool unreleasedIndexSpace =
630         !indexCount &&
631         kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
632
633     // we don't want to finalize any reserved geom on the target since
634     // we don't know that the client has finished writing to it.
635     bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
636
637     int vcount = vertexCount;
638     int icount = indexCount;
639
640     if (!insideGeoPush &&
641         !unreleasedVertexSpace &&
642         !unreleasedIndexSpace &&
643         !targetHasReservedGeom &&
644         this->geometryHints(&vcount, &icount)) {
645         this->flush();
646     }
647 }
648
649 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
650                                         int* indexCount) const {
651     // we will recommend a flush if the data could fit in a single
652     // preallocated buffer but none are left and it can't fit
653     // in the current buffer (which may not be prealloced).
654     bool flush = false;
655     if (indexCount) {
656         int32_t currIndices = fIndexPool.currentBufferIndices();
657         if (*indexCount > currIndices &&
658             (!fIndexPool.preallocatedBuffersRemaining() &&
659              *indexCount <= fIndexPool.preallocatedBufferIndices())) {
660
661             flush = true;
662         }
663         *indexCount = currIndices;
664     }
665     if (vertexCount) {
666         size_t vertexStride = this->getDrawState().getVertexStride();
667         int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride);
668         if (*vertexCount > currVertices &&
669             (!fVertexPool.preallocatedBuffersRemaining() &&
670              *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride))) {
671
672             flush = true;
673         }
674         *vertexCount = currVertices;
675     }
676     return flush;
677 }
678
679 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
680                                                int vertexCount,
681                                                void** vertices) {
682     GeometryPoolState& poolState = fGeoPoolStateStack.back();
683     SkASSERT(vertexCount > 0);
684     SkASSERT(vertices);
685     SkASSERT(0 == poolState.fUsedPoolVertexBytes);
686
687     *vertices = fVertexPool.makeSpace(vertexSize,
688                                       vertexCount,
689                                       &poolState.fPoolVertexBuffer,
690                                       &poolState.fPoolStartVertex);
691     return SkToBool(*vertices);
692 }
693
694 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
695     GeometryPoolState& poolState = fGeoPoolStateStack.back();
696     SkASSERT(indexCount > 0);
697     SkASSERT(indices);
698     SkASSERT(0 == poolState.fUsedPoolIndexBytes);
699
700     *indices = fIndexPool.makeSpace(indexCount,
701                                     &poolState.fPoolIndexBuffer,
702                                     &poolState.fPoolStartIndex);
703     return SkToBool(*indices);
704 }
705
706 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
707     GeometryPoolState& poolState = fGeoPoolStateStack.back();
708     const GeometrySrcState& geoSrc = this->getGeomSrc();
709
710     // If we get a release vertex space call then our current source should either be reserved
711     // or array (which we copied into reserved space).
712     SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
713
714     // When the caller reserved vertex buffer space we gave it back a pointer
715     // provided by the vertex buffer pool. At each draw we tracked the largest
716     // offset into the pool's pointer that was referenced. Now we return to the
717     // pool any portion at the tail of the allocation that no draw referenced.
718     size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
719     fVertexPool.putBack(reservedVertexBytes -
720                         poolState.fUsedPoolVertexBytes);
721     poolState.fUsedPoolVertexBytes = 0;
722     poolState.fPoolVertexBuffer = NULL;
723     poolState.fPoolStartVertex = 0;
724 }
725
726 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
727     GeometryPoolState& poolState = fGeoPoolStateStack.back();
728     const GeometrySrcState& geoSrc = this->getGeomSrc();
729
730     // If we get a release index space call then our current source should either be reserved
731     // or array (which we copied into reserved space).
732     SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
733
734     // Similar to releaseReservedVertexSpace we return any unused portion at
735     // the tail
736     size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
737     fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
738     poolState.fUsedPoolIndexBytes = 0;
739     poolState.fPoolIndexBuffer = NULL;
740     poolState.fPoolStartIndex = 0;
741 }
742
743 void GrInOrderDrawBuffer::geometrySourceWillPush() {
744     GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
745     poolState.fUsedPoolVertexBytes = 0;
746     poolState.fUsedPoolIndexBytes = 0;
747 #ifdef SK_DEBUG
748     poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
749     poolState.fPoolStartVertex = ~0;
750     poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
751     poolState.fPoolStartIndex = ~0;
752 #endif
753 }
754
755 void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restoredState) {
756     SkASSERT(fGeoPoolStateStack.count() > 1);
757     fGeoPoolStateStack.pop_back();
758     GeometryPoolState& poolState = fGeoPoolStateStack.back();
759     // we have to assume that any slack we had in our vertex/index data
760     // is now unreleasable because data may have been appended later in the
761     // pool.
762     if (kReserved_GeometrySrcType == restoredState.fVertexSrc) {
763         poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
764     }
765     if (kReserved_GeometrySrcType == restoredState.fIndexSrc) {
766         poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
767                                          restoredState.fIndexCount;
768     }
769 }
770
771 void GrInOrderDrawBuffer::recordStateIfNecessary() {
772     if (!fLastState) {
773         SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->getDrawState()));
774         fLastState = &ss->fState;
775         this->convertDrawStateToPendingExec(fLastState);
776         this->recordTraceMarkersIfNecessary();
777         return;
778     }
779     const GrDrawState& curr = this->getDrawState();
780     switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) {
781         case GrDrawState::kIncompatible_CombinedState:
782             fLastState = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr))->fState;
783             this->convertDrawStateToPendingExec(fLastState);
784             this->recordTraceMarkersIfNecessary();
785             break;
786         case GrDrawState::kA_CombinedState:
787         case GrDrawState::kAOrB_CombinedState: // Treat the same as kA.
788             break;
789         case GrDrawState::kB_CombinedState:
790             // prev has already been converted to pending execution. That is a one-way ticket.
791             // So here we just destruct the previous state and reinit with a new copy of curr.
792             // Note that this goes away when we move GrIODB over to taking optimized snapshots
793             // of draw states.
794             fLastState->~GrDrawState();
795             SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr));
796             this->convertDrawStateToPendingExec(fLastState);
797             break;
798     }
799 }
800
801 void GrInOrderDrawBuffer::recordClipIfNecessary() {
802     if (this->getDrawState().isClipState() &&
803         fClipSet &&
804         (!fLastClip || *fLastClip != *this->getClip())) {
805         fLastClip = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetClip, (this->getClip()))->fClipData;
806         this->recordTraceMarkersIfNecessary();
807         fClipSet = false;
808     }
809 }
810
811 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
812     SkASSERT(!fCmdBuffer.empty());
813     SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
814     const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
815     if (activeTraceMarkers.count() > 0) {
816         fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType);
817         fGpuCmdMarkers.push_back(activeTraceMarkers);
818     }
819 }
820
821 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
822     INHERITED::clipWillBeSet(newClipData);
823     fClipSet = true;
824     fClipProxyState = kUnknown_ClipProxyState;
825 }