2 * Copyright 2011 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "GrInOrderDrawBuffer.h"
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
12 #include "GrTextStrike.h"
14 #include "GrTemplates.h"
15 #include "GrTexture.h"
17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18 GrVertexBufferAllocPool* vertexPool,
19 GrIndexBufferAllocPool* indexPool)
20 : INHERITED(gpu->getContext())
21 , fCmdBuffer(kCmdBufferInitialSizeInBytes)
26 , fClipProxyState(kUnknown_ClipProxyState)
27 , fVertexPool(*vertexPool)
28 , fIndexPool(*indexPool)
33 fCaps.reset(SkRef(fDstGpu->caps()));
38 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
39 poolState.fUsedPoolVertexBytes = 0;
40 poolState.fUsedPoolIndexBytes = 0;
42 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
43 poolState.fPoolStartVertex = ~0;
44 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
45 poolState.fPoolStartIndex = ~0;
50 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
52 // This must be called by before the GrDrawTarget destructor
53 this->releaseGeometry();
57 ////////////////////////////////////////////////////////////////////////////////
60 void get_vertex_bounds(const void* vertices,
64 SkASSERT(vertexSize >= sizeof(SkPoint));
65 SkASSERT(vertexCount > 0);
66 const SkPoint* point = static_cast<const SkPoint*>(vertices);
67 bounds->fLeft = bounds->fRight = point->fX;
68 bounds->fTop = bounds->fBottom = point->fY;
69 for (int i = 1; i < vertexCount; ++i) {
70 point = reinterpret_cast<SkPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
71 bounds->growToInclude(point->fX, point->fY);
79 extern const GrVertexAttrib kRectAttribs[] = {
80 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
81 {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
82 {kVec2f_GrVertexAttribType, sizeof(SkPoint)+sizeof(GrColor), kLocalCoord_GrVertexAttribBinding},
86 /** We always use per-vertex colors so that rects can be batched across color changes. Sometimes we
87 have explicit local coords and sometimes not. We *could* always provide explicit local coords
88 and just duplicate the positions when the caller hasn't provided a local coord rect, but we
89 haven't seen a use case which frequently switches between local rect and no local rect draws.
91 The color param is used to determine whether the opaque hint can be set on the draw state.
92 The caller must populate the vertex colors itself.
94 The vertex attrib order is always pos, color, [local coords].
96 static void set_vertex_attributes(GrDrawState* drawState, bool hasLocalCoords, GrColor color) {
98 drawState->setVertexAttribs<kRectAttribs>(3, 2 * sizeof(SkPoint) + sizeof(SkColor));
100 drawState->setVertexAttribs<kRectAttribs>(2, sizeof(SkPoint) + sizeof(SkColor));
102 if (0xFF == GrColorUnpackA(color)) {
103 drawState->setHint(GrDrawState::kVertexColorsAreOpaque_Hint, true);
112 static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; }
114 static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; }
116 static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTraceCmdBit); }
118 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
119 const SkRect* localRect,
120 const SkMatrix* localMatrix) {
121 GrDrawState* drawState = this->drawState();
123 GrColor color = drawState->getColor();
125 set_vertex_attributes(drawState, SkToBool(localRect), color);
127 AutoReleaseGeometry geo(this, 4, 0);
128 if (!geo.succeeded()) {
129 SkDebugf("Failed to get space for vertices!\n");
133 // Go to device coords to allow batching across matrix changes
134 SkMatrix matrix = drawState->getViewMatrix();
136 // When the caller has provided an explicit source rect for a stage then we don't want to
137 // modify that stage's matrix. Otherwise if the effect is generating its source rect from
138 // the vertex positions then we have to account for the view matrix change.
139 GrDrawState::AutoViewMatrixRestore avmr;
140 if (!avmr.setIdentity(drawState)) {
144 size_t vstride = drawState->getVertexStride();
146 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vstride);
147 matrix.mapPointsWithStride(geo.positions(), vstride, 4);
150 // since we already computed the dev verts, set the bounds hint. This will help us avoid
151 // unnecessary clipping in our onDraw().
152 get_vertex_bounds(geo.vertices(), vstride, 4, &devBounds);
155 static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
156 SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) + kLocalOffset);
157 coords->setRectFan(localRect->fLeft, localRect->fTop,
158 localRect->fRight, localRect->fBottom,
161 localMatrix->mapPointsWithStride(coords, vstride, 4);
165 static const int kColorOffset = sizeof(SkPoint);
166 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + kColorOffset);
167 for (int i = 0; i < 4; ++i) {
169 vertColor = (GrColor*) ((intptr_t) vertColor + vstride);
172 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
173 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
175 // to ensure that stashing the drawState ptr is valid
176 SkASSERT(this->drawState() == drawState);
179 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
180 if (!this->getDrawState().isClipState()) {
183 if (kUnknown_ClipProxyState == fClipProxyState) {
186 this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
188 // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
189 // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
190 // free via the viewport. We don't want to think that clipping must be enabled in this
191 // case. So we extend the clip outward from the edge to avoid these false negatives.
192 fClipProxyState = kValid_ClipProxyState;
193 fClipProxy = SkRect::Make(rect);
195 if (fClipProxy.fLeft <= 0) {
196 fClipProxy.fLeft = SK_ScalarMin;
198 if (fClipProxy.fTop <= 0) {
199 fClipProxy.fTop = SK_ScalarMin;
201 if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
202 fClipProxy.fRight = SK_ScalarMax;
204 if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
205 fClipProxy.fBottom = SK_ScalarMax;
208 fClipProxyState = kInvalid_ClipProxyState;
211 if (kValid_ClipProxyState == fClipProxyState) {
212 return fClipProxy.contains(devBounds);
214 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
215 SkIntToScalar(this->getClip()->fOrigin.fY)};
216 SkRect clipSpaceBounds = devBounds;
217 clipSpaceBounds.offset(originOffset);
218 return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
221 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
222 SkASSERT(!fCmdBuffer.empty());
223 SkASSERT(info.isInstanced());
225 const GeometrySrcState& geomSrc = this->getGeomSrc();
226 const GrDrawState& drawState = this->getDrawState();
228 // we only attempt to concat the case when reserved verts are used with a client-specified index
229 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
231 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
232 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
235 // Check if there is a draw info that is compatible that uses the same VB from the pool and
237 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
241 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
242 GeometryPoolState& poolState = fGeoPoolStateStack.back();
243 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
245 if (!draw->fInfo.isInstanced() ||
246 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
247 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
248 draw->vertexBuffer() != vertexBuffer ||
249 draw->indexBuffer() != geomSrc.fIndexBuffer) {
252 // info does not yet account for the offset from the start of the pool's VB while the previous
254 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
255 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != adjustedStartVertex) {
259 SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fInfo.vertexCount());
261 // how many instances can be concat'ed onto draw given the size of the index buffer
262 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
263 instancesToConcat -= draw->fInfo.instanceCount();
264 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
266 // update the amount of reserved vertex data actually referenced in draws
267 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
268 drawState.getVertexStride();
269 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
271 draw->fInfo.adjustInstanceCount(instancesToConcat);
273 // update last fGpuCmdMarkers to include any additional trace markers that have been added
274 if (this->getActiveTraceMarkers().count() > 0) {
275 if (cmd_has_trace_marker(draw->fType)) {
276 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
278 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
279 draw->fType = add_trace_bit(draw->fType);
283 return instancesToConcat;
286 class AutoClipReenable {
288 AutoClipReenable() : fDrawState(NULL) {}
289 ~AutoClipReenable() {
291 fDrawState->enableState(GrDrawState::kClip_StateBit);
294 void set(GrDrawState* drawState) {
295 if (drawState->isClipState()) {
296 fDrawState = drawState;
297 drawState->disableState(GrDrawState::kClip_StateBit);
301 GrDrawState* fDrawState;
304 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
306 GeometryPoolState& poolState = fGeoPoolStateStack.back();
307 const GrDrawState& drawState = this->getDrawState();
308 AutoClipReenable acr;
310 if (drawState.isClipState() &&
311 info.getDevBounds() &&
312 this->quickInsideClip(*info.getDevBounds())) {
313 acr.set(this->drawState());
316 this->recordClipIfNecessary();
317 this->recordStateIfNecessary();
319 const GrVertexBuffer* vb;
320 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) {
321 vb = this->getGeomSrc().fVertexBuffer;
323 vb = poolState.fPoolVertexBuffer;
326 const GrIndexBuffer* ib = NULL;
327 if (info.isIndexed()) {
328 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) {
329 ib = this->getGeomSrc().fIndexBuffer;
331 ib = poolState.fPoolIndexBuffer;
336 if (info.isInstanced()) {
337 int instancesConcated = this->concatInstancedDraw(info);
338 if (info.instanceCount() > instancesConcated) {
339 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
340 draw->fInfo.adjustInstanceCount(-instancesConcated);
345 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
347 this->recordTraceMarkersIfNecessary();
349 // Adjust the starting vertex and index when we are using reserved or array sources to
350 // compensate for the fact that the data was inserted into a larger vb/ib owned by the pool.
351 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
352 size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.getVertexStride();
353 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
354 draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex);
357 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndexSrc) {
358 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
359 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, bytes);
360 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex);
364 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, GrPathRendering::FillType fill) {
365 this->recordClipIfNecessary();
366 // Only compare the subset of GrDrawState relevant to path stenciling?
367 this->recordStateIfNecessary();
368 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path));
370 this->recordTraceMarkersIfNecessary();
373 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
374 GrPathRendering::FillType fill,
375 const GrDeviceCoordTexture* dstCopy) {
376 this->recordClipIfNecessary();
377 // TODO: Only compare the subset of GrDrawState relevant to path covering?
378 this->recordStateIfNecessary();
379 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
382 dp->fDstCopy = *dstCopy;
384 this->recordTraceMarkersIfNecessary();
387 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
388 const uint32_t indices[], int count,
389 const float transforms[], PathTransformType transformsType,
390 GrPathRendering::FillType fill,
391 const GrDeviceCoordTexture* dstCopy) {
394 SkASSERT(transforms);
396 this->recordClipIfNecessary();
397 this->recordStateIfNecessary();
399 int sizeOfIndices = sizeof(uint32_t) * count;
400 int sizeOfTransforms = sizeof(float) * count *
401 GrPathRendering::PathTransformSize(transformsType);
403 DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange),
404 sizeOfIndices + sizeOfTransforms);
405 memcpy(dp->indices(), indices, sizeOfIndices);
407 memcpy(dp->transforms(), transforms, sizeOfTransforms);
408 dp->fTransformsType = transformsType;
411 dp->fDstCopy = *dstCopy;
414 this->recordTraceMarkersIfNecessary();
417 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color,
418 bool canIgnoreRect, GrRenderTarget* renderTarget) {
420 if (NULL == renderTarget) {
421 renderTarget = this->drawState()->getRenderTarget();
422 SkASSERT(renderTarget);
425 // We could do something smart and remove previous draws and clears to
426 // the current render target. If we get that smart we have to make sure
427 // those draws aren't read before this clear (render-to-texture).
428 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
431 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
432 GrColorIsPMAssert(color);
435 clr->fCanIgnoreRect = canIgnoreRect;
436 this->recordTraceMarkersIfNecessary();
439 void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect,
441 GrRenderTarget* renderTarget) {
442 if (NULL == renderTarget) {
443 renderTarget = this->drawState()->getRenderTarget();
444 SkASSERT(renderTarget);
446 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
448 clr->fInsideClip = insideClip;
449 this->recordTraceMarkersIfNecessary();
452 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
453 SkASSERT(renderTarget);
454 if (!this->caps()->discardRenderTargetSupport()) {
457 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
458 clr->fColor = GrColor_ILLEGAL;
459 this->recordTraceMarkersIfNecessary();
462 void GrInOrderDrawBuffer::reset() {
463 SkASSERT(1 == fGeoPoolStateStack.count());
464 this->resetVertexSource();
465 this->resetIndexSource();
472 fGpuCmdMarkers.reset();
476 void GrInOrderDrawBuffer::flush() {
481 this->getContext()->getFontCache()->updateTextures();
483 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
484 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
486 if (fCmdBuffer.empty()) {
490 GrAutoTRestore<bool> flushRestore(&fFlushing);
496 GrDrawTarget::AutoClipRestore acr(fDstGpu);
497 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
499 GrDrawState* prevDrawState = SkRef(fDstGpu->drawState());
501 CmdBuffer::Iter iter(fCmdBuffer);
503 int currCmdMarker = 0;
504 fDstGpu->saveActiveTraceMarkers();
506 while (iter.next()) {
507 GrGpuTraceMarker newMarker("", -1);
508 SkString traceString;
509 if (cmd_has_trace_marker(iter->fType)) {
510 traceString = fGpuCmdMarkers[currCmdMarker].toString();
511 newMarker.fMarker = traceString.c_str();
512 fDstGpu->addGpuTraceMarker(&newMarker);
516 SkDEBUGCODE(bool isDraw = kDraw_Cmd == strip_trace_bit(iter->fType) ||
517 kStencilPath_Cmd == strip_trace_bit(iter->fType) ||
518 kDrawPath_Cmd == strip_trace_bit(iter->fType) ||
519 kDrawPaths_Cmd == strip_trace_bit(iter->fType));
520 SkASSERT(!isDraw || fDstGpu->drawState() != prevDrawState);
522 iter->execute(fDstGpu);
524 if (cmd_has_trace_marker(iter->fType)) {
525 fDstGpu->removeGpuTraceMarker(&newMarker);
529 fDstGpu->restoreActiveTraceMarkers();
530 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
532 fDstGpu->setDrawState(prevDrawState);
533 prevDrawState->unref();
538 void GrInOrderDrawBuffer::Draw::execute(GrClipTarget* gpu) {
539 gpu->setVertexSourceToBuffer(this->vertexBuffer());
540 if (fInfo.isIndexed()) {
541 gpu->setIndexSourceToBuffer(this->indexBuffer());
543 gpu->executeDraw(fInfo);
546 void GrInOrderDrawBuffer::StencilPath::execute(GrClipTarget* gpu) {
547 gpu->stencilPath(this->path(), fFill);
550 void GrInOrderDrawBuffer::DrawPath::execute(GrClipTarget* gpu) {
551 gpu->executeDrawPath(this->path(), fFill, fDstCopy.texture() ? &fDstCopy : NULL);
554 void GrInOrderDrawBuffer::DrawPaths::execute(GrClipTarget* gpu) {
555 gpu->executeDrawPaths(this->pathRange(), this->indices(), fCount, this->transforms(),
556 fTransformsType, fFill, fDstCopy.texture() ? &fDstCopy : NULL);
559 void GrInOrderDrawBuffer::SetState::execute(GrClipTarget* gpu) {
560 gpu->setDrawState(&fState);
563 void GrInOrderDrawBuffer::SetClip::execute(GrClipTarget* gpu) {
564 // Our fClipData is referenced directly, so we must remain alive for the entire
565 // duration of the flush (after which the gpu's previous clip is restored).
566 gpu->setClip(&fClipData);
569 void GrInOrderDrawBuffer::Clear::execute(GrClipTarget* gpu) {
570 if (GrColor_ILLEGAL == fColor) {
571 gpu->discard(this->renderTarget());
573 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
577 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrClipTarget* gpu) {
578 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
581 void GrInOrderDrawBuffer::CopySurface::execute(GrClipTarget* gpu) {
582 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
585 bool GrInOrderDrawBuffer::copySurface(GrSurface* dst,
587 const SkIRect& srcRect,
588 const SkIPoint& dstPoint) {
589 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
590 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
591 cs->fSrcRect = srcRect;
592 cs->fDstPoint = dstPoint;
593 this->recordTraceMarkersIfNecessary();
595 } else if (GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint)) {
596 GrDrawTarget::copySurface(dst, src, srcRect, dstPoint);
603 bool GrInOrderDrawBuffer::canCopySurface(GrSurface* dst,
605 const SkIRect& srcRect,
606 const SkIPoint& dstPoint) {
607 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint) ||
608 GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint);
611 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
612 fDstGpu->initCopySurfaceDstDesc(src, desc);
615 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
617 // We use geometryHints() to know whether to flush the draw buffer. We
618 // can't flush if we are inside an unbalanced pushGeometrySource.
619 // Moreover, flushing blows away vertex and index data that was
620 // previously reserved. So if the vertex or index data is pulled from
621 // reserved space and won't be released by this request then we can't
623 bool insideGeoPush = fGeoPoolStateStack.count() > 1;
625 bool unreleasedVertexSpace =
627 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
629 bool unreleasedIndexSpace =
631 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
633 // we don't want to finalize any reserved geom on the target since
634 // we don't know that the client has finished writing to it.
635 bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
637 int vcount = vertexCount;
638 int icount = indexCount;
640 if (!insideGeoPush &&
641 !unreleasedVertexSpace &&
642 !unreleasedIndexSpace &&
643 !targetHasReservedGeom &&
644 this->geometryHints(&vcount, &icount)) {
649 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
650 int* indexCount) const {
651 // we will recommend a flush if the data could fit in a single
652 // preallocated buffer but none are left and it can't fit
653 // in the current buffer (which may not be prealloced).
656 int32_t currIndices = fIndexPool.currentBufferIndices();
657 if (*indexCount > currIndices &&
658 (!fIndexPool.preallocatedBuffersRemaining() &&
659 *indexCount <= fIndexPool.preallocatedBufferIndices())) {
663 *indexCount = currIndices;
666 size_t vertexStride = this->getDrawState().getVertexStride();
667 int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride);
668 if (*vertexCount > currVertices &&
669 (!fVertexPool.preallocatedBuffersRemaining() &&
670 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride))) {
674 *vertexCount = currVertices;
679 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
682 GeometryPoolState& poolState = fGeoPoolStateStack.back();
683 SkASSERT(vertexCount > 0);
685 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
687 *vertices = fVertexPool.makeSpace(vertexSize,
689 &poolState.fPoolVertexBuffer,
690 &poolState.fPoolStartVertex);
691 return SkToBool(*vertices);
694 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
695 GeometryPoolState& poolState = fGeoPoolStateStack.back();
696 SkASSERT(indexCount > 0);
698 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
700 *indices = fIndexPool.makeSpace(indexCount,
701 &poolState.fPoolIndexBuffer,
702 &poolState.fPoolStartIndex);
703 return SkToBool(*indices);
706 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
707 GeometryPoolState& poolState = fGeoPoolStateStack.back();
708 const GeometrySrcState& geoSrc = this->getGeomSrc();
710 // If we get a release vertex space call then our current source should either be reserved
711 // or array (which we copied into reserved space).
712 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
714 // When the caller reserved vertex buffer space we gave it back a pointer
715 // provided by the vertex buffer pool. At each draw we tracked the largest
716 // offset into the pool's pointer that was referenced. Now we return to the
717 // pool any portion at the tail of the allocation that no draw referenced.
718 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
719 fVertexPool.putBack(reservedVertexBytes -
720 poolState.fUsedPoolVertexBytes);
721 poolState.fUsedPoolVertexBytes = 0;
722 poolState.fPoolVertexBuffer = NULL;
723 poolState.fPoolStartVertex = 0;
726 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
727 GeometryPoolState& poolState = fGeoPoolStateStack.back();
728 const GeometrySrcState& geoSrc = this->getGeomSrc();
730 // If we get a release index space call then our current source should either be reserved
731 // or array (which we copied into reserved space).
732 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
734 // Similar to releaseReservedVertexSpace we return any unused portion at
736 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
737 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
738 poolState.fUsedPoolIndexBytes = 0;
739 poolState.fPoolIndexBuffer = NULL;
740 poolState.fPoolStartIndex = 0;
743 void GrInOrderDrawBuffer::geometrySourceWillPush() {
744 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
745 poolState.fUsedPoolVertexBytes = 0;
746 poolState.fUsedPoolIndexBytes = 0;
748 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
749 poolState.fPoolStartVertex = ~0;
750 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
751 poolState.fPoolStartIndex = ~0;
755 void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restoredState) {
756 SkASSERT(fGeoPoolStateStack.count() > 1);
757 fGeoPoolStateStack.pop_back();
758 GeometryPoolState& poolState = fGeoPoolStateStack.back();
759 // we have to assume that any slack we had in our vertex/index data
760 // is now unreleasable because data may have been appended later in the
762 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) {
763 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
765 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) {
766 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
767 restoredState.fIndexCount;
771 void GrInOrderDrawBuffer::recordStateIfNecessary() {
773 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->getDrawState()));
774 fLastState = &ss->fState;
775 this->convertDrawStateToPendingExec(fLastState);
776 this->recordTraceMarkersIfNecessary();
779 const GrDrawState& curr = this->getDrawState();
780 switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) {
781 case GrDrawState::kIncompatible_CombinedState:
782 fLastState = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr))->fState;
783 this->convertDrawStateToPendingExec(fLastState);
784 this->recordTraceMarkersIfNecessary();
786 case GrDrawState::kA_CombinedState:
787 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA.
789 case GrDrawState::kB_CombinedState:
790 // prev has already been converted to pending execution. That is a one-way ticket.
791 // So here we just destruct the previous state and reinit with a new copy of curr.
792 // Note that this goes away when we move GrIODB over to taking optimized snapshots
794 fLastState->~GrDrawState();
795 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr));
796 this->convertDrawStateToPendingExec(fLastState);
801 void GrInOrderDrawBuffer::recordClipIfNecessary() {
802 if (this->getDrawState().isClipState() &&
804 (!fLastClip || *fLastClip != *this->getClip())) {
805 fLastClip = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetClip, (this->getClip()))->fClipData;
806 this->recordTraceMarkersIfNecessary();
811 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
812 SkASSERT(!fCmdBuffer.empty());
813 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
814 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
815 if (activeTraceMarkers.count() > 0) {
816 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType);
817 fGpuCmdMarkers.push_back(activeTraceMarkers);
821 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
822 INHERITED::clipWillBeSet(newClipData);
824 fClipProxyState = kUnknown_ClipProxyState;