2 * Copyright 2011 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "GrInOrderDrawBuffer.h"
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
13 #include "GrIndexBuffer.h"
16 #include "GrRenderTarget.h"
17 #include "GrTemplates.h"
18 #include "GrTexture.h"
19 #include "GrVertexBuffer.h"
21 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
22 GrVertexBufferAllocPool* vertexPool,
23 GrIndexBufferAllocPool* indexPool)
24 : GrDrawTarget(gpu->getContext())
27 , fClipProxyState(kUnknown_ClipProxyState)
28 , fVertexPool(*vertexPool)
29 , fIndexPool(*indexPool)
34 fCaps.reset(SkRef(fDstGpu->caps()));
36 SkASSERT(NULL != vertexPool);
37 SkASSERT(NULL != indexPool);
39 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
40 poolState.fUsedPoolVertexBytes = 0;
41 poolState.fUsedPoolIndexBytes = 0;
43 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
44 poolState.fPoolStartVertex = ~0;
45 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
46 poolState.fPoolStartIndex = ~0;
51 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
53 // This must be called by before the GrDrawTarget destructor
54 this->releaseGeometry();
58 ////////////////////////////////////////////////////////////////////////////////
61 void get_vertex_bounds(const void* vertices,
65 SkASSERT(vertexSize >= sizeof(SkPoint));
66 SkASSERT(vertexCount > 0);
67 const SkPoint* point = static_cast<const SkPoint*>(vertices);
68 bounds->fLeft = bounds->fRight = point->fX;
69 bounds->fTop = bounds->fBottom = point->fY;
70 for (int i = 1; i < vertexCount; ++i) {
71 point = reinterpret_cast<SkPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
72 bounds->growToInclude(point->fX, point->fY);
80 extern const GrVertexAttrib kRectPosColorUVAttribs[] = {
81 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
82 {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
83 {kVec2f_GrVertexAttribType, sizeof(SkPoint)+sizeof(GrColor),
84 kLocalCoord_GrVertexAttribBinding},
87 extern const GrVertexAttrib kRectPosUVAttribs[] = {
88 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
89 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding},
92 static void set_vertex_attributes(GrDrawState* drawState,
93 bool hasColor, bool hasUVs,
94 int* colorOffset, int* localOffset) {
98 // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing
99 // only in color is a common occurrence in tables). However, having per-vertex colors disables
100 // blending optimizations because we don't know if the color will be solid or not. These
101 // optimizations help determine whether coverage and color can be blended correctly when
102 // dual-source blending isn't available. This comes into play when there is coverage. If colors
103 // were a stage it could take a hint that every vertex's color will be opaque.
104 if (hasColor && hasUVs) {
105 *colorOffset = sizeof(SkPoint);
106 *localOffset = sizeof(SkPoint) + sizeof(GrColor);
107 drawState->setVertexAttribs<kRectPosColorUVAttribs>(3);
108 } else if (hasColor) {
109 *colorOffset = sizeof(SkPoint);
110 drawState->setVertexAttribs<kRectPosColorUVAttribs>(2);
112 *localOffset = sizeof(SkPoint);
113 drawState->setVertexAttribs<kRectPosUVAttribs>(2);
115 drawState->setVertexAttribs<kRectPosUVAttribs>(1);
126 static uint8_t add_trace_bit(uint8_t cmd) {
127 return cmd | kTraceCmdBit;
130 static uint8_t strip_trace_bit(uint8_t cmd) {
131 return cmd & kCmdMask;
134 static bool cmd_has_trace_marker(uint8_t cmd) {
135 return SkToBool(cmd & kTraceCmdBit);
138 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
139 const SkMatrix* matrix,
140 const SkRect* localRect,
141 const SkMatrix* localMatrix) {
142 GrDrawState::AutoColorRestore acr;
144 GrDrawState* drawState = this->drawState();
146 GrColor color = drawState->getColor();
148 int colorOffset, localOffset;
149 set_vertex_attributes(drawState,
150 this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(),
152 &colorOffset, &localOffset);
153 if (colorOffset >= 0) {
154 // We set the draw state's color to white here. This is done so that any batching performed
155 // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color
156 // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the
157 // constant color in its op== when the kColor layout bit is set and then we can remove
159 acr.set(drawState, 0xFFFFFFFF);
162 AutoReleaseGeometry geo(this, 4, 0);
163 if (!geo.succeeded()) {
164 GrPrintf("Failed to get space for vertices!\n");
168 // Go to device coords to allow batching across matrix changes
169 SkMatrix combinedMatrix;
170 if (NULL != matrix) {
171 combinedMatrix = *matrix;
173 combinedMatrix.reset();
175 combinedMatrix.postConcat(drawState->getViewMatrix());
176 // When the caller has provided an explicit source rect for a stage then we don't want to
177 // modify that stage's matrix. Otherwise if the effect is generating its source rect from
178 // the vertex positions then we have to account for the view matrix change.
179 GrDrawState::AutoViewMatrixRestore avmr;
180 if (!avmr.setIdentity(drawState)) {
184 size_t vsize = drawState->getVertexSize();
186 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize);
187 combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4);
190 // since we already computed the dev verts, set the bounds hint. This will help us avoid
191 // unnecessary clipping in our onDraw().
192 get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds);
194 if (localOffset >= 0) {
195 SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset);
196 coords->setRectFan(localRect->fLeft, localRect->fTop,
197 localRect->fRight, localRect->fBottom,
199 if (NULL != localMatrix) {
200 localMatrix->mapPointsWithStride(coords, vsize, 4);
204 if (colorOffset >= 0) {
205 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset);
206 for (int i = 0; i < 4; ++i) {
208 vertColor = (GrColor*) ((intptr_t) vertColor + vsize);
212 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
213 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
215 // to ensure that stashing the drawState ptr is valid
216 SkASSERT(this->drawState() == drawState);
219 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
220 if (!this->getDrawState().isClipState()) {
223 if (kUnknown_ClipProxyState == fClipProxyState) {
226 this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
228 // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
229 // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
230 // free via the viewport. We don't want to think that clipping must be enabled in this
231 // case. So we extend the clip outward from the edge to avoid these false negatives.
232 fClipProxyState = kValid_ClipProxyState;
233 fClipProxy = SkRect::Make(rect);
235 if (fClipProxy.fLeft <= 0) {
236 fClipProxy.fLeft = SK_ScalarMin;
238 if (fClipProxy.fTop <= 0) {
239 fClipProxy.fTop = SK_ScalarMin;
241 if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
242 fClipProxy.fRight = SK_ScalarMax;
244 if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
245 fClipProxy.fBottom = SK_ScalarMax;
248 fClipProxyState = kInvalid_ClipProxyState;
251 if (kValid_ClipProxyState == fClipProxyState) {
252 return fClipProxy.contains(devBounds);
254 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
255 SkIntToScalar(this->getClip()->fOrigin.fY)};
256 SkRect clipSpaceBounds = devBounds;
257 clipSpaceBounds.offset(originOffset);
258 return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
261 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
262 SkASSERT(info.isInstanced());
264 const GeometrySrcState& geomSrc = this->getGeomSrc();
265 const GrDrawState& drawState = this->getDrawState();
267 // we only attempt to concat the case when reserved verts are used with a client-specified index
268 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
270 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
271 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
274 // Check if there is a draw info that is compatible that uses the same VB from the pool and
276 if (kDraw_Cmd != strip_trace_bit(fCmds.back())) {
280 DrawRecord* draw = &fDraws.back();
281 GeometryPoolState& poolState = fGeoPoolStateStack.back();
282 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
284 if (!draw->isInstanced() ||
285 draw->verticesPerInstance() != info.verticesPerInstance() ||
286 draw->indicesPerInstance() != info.indicesPerInstance() ||
287 draw->fVertexBuffer != vertexBuffer ||
288 draw->fIndexBuffer != geomSrc.fIndexBuffer) {
291 // info does not yet account for the offset from the start of the pool's VB while the previous
293 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
294 if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
298 SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
300 // how many instances can be concat'ed onto draw given the size of the index buffer
301 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
302 instancesToConcat -= draw->instanceCount();
303 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
305 // update the amount of reserved vertex data actually referenced in draws
306 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
307 drawState.getVertexSize();
308 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
310 draw->adjustInstanceCount(instancesToConcat);
312 // update last fGpuCmdMarkers to include any additional trace markers that have been added
313 if (this->getActiveTraceMarkers().count() > 0) {
314 if (cmd_has_trace_marker(fCmds.back())) {
315 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
317 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
318 fCmds.back() = add_trace_bit(fCmds.back());
322 return instancesToConcat;
325 class AutoClipReenable {
327 AutoClipReenable() : fDrawState(NULL) {}
328 ~AutoClipReenable() {
329 if (NULL != fDrawState) {
330 fDrawState->enableState(GrDrawState::kClip_StateBit);
333 void set(GrDrawState* drawState) {
334 if (drawState->isClipState()) {
335 fDrawState = drawState;
336 drawState->disableState(GrDrawState::kClip_StateBit);
340 GrDrawState* fDrawState;
343 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
345 GeometryPoolState& poolState = fGeoPoolStateStack.back();
346 const GrDrawState& drawState = this->getDrawState();
347 AutoClipReenable acr;
349 if (drawState.isClipState() &&
350 NULL != info.getDevBounds() &&
351 this->quickInsideClip(*info.getDevBounds())) {
352 acr.set(this->drawState());
355 if (this->needsNewClip()) {
358 if (this->needsNewState()) {
363 if (info.isInstanced()) {
364 int instancesConcated = this->concatInstancedDraw(info);
365 if (info.instanceCount() > instancesConcated) {
366 draw = this->recordDraw(info);
367 draw->adjustInstanceCount(-instancesConcated);
372 draw = this->recordDraw(info);
375 switch (this->getGeomSrc().fVertexSrc) {
376 case kBuffer_GeometrySrcType:
377 draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer;
379 case kReserved_GeometrySrcType: // fallthrough
380 case kArray_GeometrySrcType: {
381 size_t vertexBytes = (info.vertexCount() + info.startVertex()) *
382 drawState.getVertexSize();
383 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
384 draw->fVertexBuffer = poolState.fPoolVertexBuffer;
385 draw->adjustStartVertex(poolState.fPoolStartVertex);
389 SkFAIL("unknown geom src type");
391 draw->fVertexBuffer->ref();
393 if (info.isIndexed()) {
394 switch (this->getGeomSrc().fIndexSrc) {
395 case kBuffer_GeometrySrcType:
396 draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer;
398 case kReserved_GeometrySrcType: // fallthrough
399 case kArray_GeometrySrcType: {
400 size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
401 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, indexBytes);
402 draw->fIndexBuffer = poolState.fPoolIndexBuffer;
403 draw->adjustStartIndex(poolState.fPoolStartIndex);
407 SkFAIL("unknown geom src type");
409 draw->fIndexBuffer->ref();
411 draw->fIndexBuffer = NULL;
415 GrInOrderDrawBuffer::StencilPath::StencilPath() {}
416 GrInOrderDrawBuffer::DrawPath::DrawPath() {}
417 GrInOrderDrawBuffer::DrawPaths::DrawPaths() {}
418 GrInOrderDrawBuffer::DrawPaths::~DrawPaths() {
420 SkDELETE_ARRAY(fTransforms);
422 for (int i = 0; i < fPathCount; ++i) {
425 SkDELETE_ARRAY(fPaths);
428 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
429 if (this->needsNewClip()) {
432 // Only compare the subset of GrDrawState relevant to path stenciling?
433 if (this->needsNewState()) {
436 StencilPath* sp = this->recordStencilPath();
437 sp->fPath.reset(path);
442 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
443 SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
444 if (this->needsNewClip()) {
447 // TODO: Only compare the subset of GrDrawState relevant to path covering?
448 if (this->needsNewState()) {
451 DrawPath* cp = this->recordDrawPath();
452 cp->fPath.reset(path);
455 if (NULL != dstCopy) {
456 cp->fDstCopy = *dstCopy;
460 void GrInOrderDrawBuffer::onDrawPaths(int pathCount, const GrPath** paths,
461 const SkMatrix* transforms,
462 SkPath::FillType fill,
463 SkStrokeRec::Style stroke,
464 const GrDeviceCoordTexture* dstCopy) {
467 if (this->needsNewClip()) {
470 if (this->needsNewState()) {
473 DrawPaths* dp = this->recordDrawPaths();
474 dp->fPathCount = pathCount;
475 dp->fPaths = SkNEW_ARRAY(const GrPath*, pathCount);
476 memcpy(dp->fPaths, paths, sizeof(GrPath*) * pathCount);
477 for (int i = 0; i < pathCount; ++i) {
478 dp->fPaths[i]->ref();
481 dp->fTransforms = SkNEW_ARRAY(SkMatrix, pathCount);
482 memcpy(dp->fTransforms, transforms, sizeof(SkMatrix) * pathCount);
485 dp->fStroke = stroke;
487 if (NULL != dstCopy) {
488 dp->fDstCopy = *dstCopy;
492 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
493 bool canIgnoreRect, GrRenderTarget* renderTarget) {
495 if (NULL == renderTarget) {
496 renderTarget = this->drawState()->getRenderTarget();
497 SkASSERT(NULL != renderTarget);
500 // We could do something smart and remove previous draws and clears to
501 // the current render target. If we get that smart we have to make sure
502 // those draws aren't read before this clear (render-to-texture).
503 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
506 Clear* clr = this->recordClear();
507 GrColorIsPMAssert(color);
510 clr->fCanIgnoreRect = canIgnoreRect;
511 clr->fRenderTarget = renderTarget;
515 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
516 if (!this->caps()->discardRenderTargetSupport()) {
519 if (NULL == renderTarget) {
520 renderTarget = this->drawState()->getRenderTarget();
521 SkASSERT(NULL != renderTarget);
523 Clear* clr = this->recordClear();
524 clr->fColor = GrColor_ILLEGAL;
525 clr->fRenderTarget = renderTarget;
529 void GrInOrderDrawBuffer::reset() {
530 SkASSERT(1 == fGeoPoolStateStack.count());
531 this->resetVertexSource();
532 this->resetIndexSource();
533 int numDraws = fDraws.count();
534 for (int d = 0; d < numDraws; ++d) {
535 // we always have a VB, but not always an IB
536 SkASSERT(NULL != fDraws[d].fVertexBuffer);
537 fDraws[d].fVertexBuffer->unref();
538 SkSafeUnref(fDraws[d].fIndexBuffer);
542 fStencilPaths.reset();
550 fClipOrigins.reset();
551 fCopySurfaces.reset();
552 fGpuCmdMarkers.reset();
556 void GrInOrderDrawBuffer::flush() {
561 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
562 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
564 int numCmds = fCmds.count();
569 GrAutoTRestore<bool> flushRestore(&fFlushing);
572 fVertexPool.unlock();
575 GrDrawTarget::AutoClipRestore acr(fDstGpu);
576 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
578 GrDrawState playbackState;
579 GrDrawState* prevDrawState = fDstGpu->drawState();
580 prevDrawState->ref();
581 fDstGpu->setDrawState(&playbackState);
589 int currStencilPath = 0;
590 int currDrawPath = 0;
591 int currDrawPaths = 0;
592 int currCopySurface = 0;
593 int currCmdMarker = 0;
595 for (int c = 0; c < numCmds; ++c) {
596 GrGpuTraceMarker newMarker("", -1);
597 if (cmd_has_trace_marker(fCmds[c])) {
598 SkString traceString = fGpuCmdMarkers[currCmdMarker].toString();
599 newMarker.fMarker = traceString.c_str();
600 fDstGpu->addGpuTraceMarker(&newMarker);
603 switch (strip_trace_bit(fCmds[c])) {
605 const DrawRecord& draw = fDraws[currDraw];
606 fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer);
607 if (draw.isIndexed()) {
608 fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer);
610 fDstGpu->executeDraw(draw);
614 case kStencilPath_Cmd: {
615 const StencilPath& sp = fStencilPaths[currStencilPath];
616 fDstGpu->stencilPath(sp.fPath.get(), sp.fFill);
620 case kDrawPath_Cmd: {
621 const DrawPath& cp = fDrawPath[currDrawPath];
622 fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill,
623 NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL);
627 case kDrawPaths_Cmd: {
628 DrawPaths& dp = fDrawPaths[currDrawPaths];
629 const GrDeviceCoordTexture* dstCopy =
630 NULL != dp.fDstCopy.texture() ? &dp.fDstCopy : NULL;
631 fDstGpu->executeDrawPaths(dp.fPathCount, dp.fPaths,
632 dp.fTransforms, dp.fFill, dp.fStroke,
638 fStates[currState].restoreTo(&playbackState);
642 clipData.fClipStack = &fClips[currClip];
643 clipData.fOrigin = fClipOrigins[currClip];
644 fDstGpu->setClip(&clipData);
648 if (GrColor_ILLEGAL == fClears[currClear].fColor) {
649 fDstGpu->discard(fClears[currClear].fRenderTarget);
651 fDstGpu->clear(&fClears[currClear].fRect,
652 fClears[currClear].fColor,
653 fClears[currClear].fCanIgnoreRect,
654 fClears[currClear].fRenderTarget);
658 case kCopySurface_Cmd:
659 fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(),
660 fCopySurfaces[currCopySurface].fSrc.get(),
661 fCopySurfaces[currCopySurface].fSrcRect,
662 fCopySurfaces[currCopySurface].fDstPoint);
666 if (cmd_has_trace_marker(fCmds[c])) {
667 fDstGpu->removeGpuTraceMarker(&newMarker);
670 // we should have consumed all the states, clips, etc.
671 SkASSERT(fStates.count() == currState);
672 SkASSERT(fClips.count() == currClip);
673 SkASSERT(fClipOrigins.count() == currClip);
674 SkASSERT(fClears.count() == currClear);
675 SkASSERT(fDraws.count() == currDraw);
676 SkASSERT(fCopySurfaces.count() == currCopySurface);
677 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
679 fDstGpu->setDrawState(prevDrawState);
680 prevDrawState->unref();
685 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
687 const SkIRect& srcRect,
688 const SkIPoint& dstPoint) {
689 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
690 CopySurface* cs = this->recordCopySurface();
691 cs->fDst.reset(SkRef(dst));
692 cs->fSrc.reset(SkRef(src));
693 cs->fSrcRect = srcRect;
694 cs->fDstPoint = dstPoint;
701 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
703 const SkIRect& srcRect,
704 const SkIPoint& dstPoint) {
705 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
708 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
709 fDstGpu->initCopySurfaceDstDesc(src, desc);
712 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
714 // We use geometryHints() to know whether to flush the draw buffer. We
715 // can't flush if we are inside an unbalanced pushGeometrySource.
716 // Moreover, flushing blows away vertex and index data that was
717 // previously reserved. So if the vertex or index data is pulled from
718 // reserved space and won't be released by this request then we can't
720 bool insideGeoPush = fGeoPoolStateStack.count() > 1;
722 bool unreleasedVertexSpace =
724 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
726 bool unreleasedIndexSpace =
728 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
730 // we don't want to finalize any reserved geom on the target since
731 // we don't know that the client has finished writing to it.
732 bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
734 int vcount = vertexCount;
735 int icount = indexCount;
737 if (!insideGeoPush &&
738 !unreleasedVertexSpace &&
739 !unreleasedIndexSpace &&
740 !targetHasReservedGeom &&
741 this->geometryHints(&vcount, &icount)) {
747 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
748 int* indexCount) const {
749 // we will recommend a flush if the data could fit in a single
750 // preallocated buffer but none are left and it can't fit
751 // in the current buffer (which may not be prealloced).
753 if (NULL != indexCount) {
754 int32_t currIndices = fIndexPool.currentBufferIndices();
755 if (*indexCount > currIndices &&
756 (!fIndexPool.preallocatedBuffersRemaining() &&
757 *indexCount <= fIndexPool.preallocatedBufferIndices())) {
761 *indexCount = currIndices;
763 if (NULL != vertexCount) {
764 size_t vertexSize = this->getDrawState().getVertexSize();
765 int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize);
766 if (*vertexCount > currVertices &&
767 (!fVertexPool.preallocatedBuffersRemaining() &&
768 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) {
772 *vertexCount = currVertices;
777 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
780 GeometryPoolState& poolState = fGeoPoolStateStack.back();
781 SkASSERT(vertexCount > 0);
782 SkASSERT(NULL != vertices);
783 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
785 *vertices = fVertexPool.makeSpace(vertexSize,
787 &poolState.fPoolVertexBuffer,
788 &poolState.fPoolStartVertex);
789 return NULL != *vertices;
792 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
793 GeometryPoolState& poolState = fGeoPoolStateStack.back();
794 SkASSERT(indexCount > 0);
795 SkASSERT(NULL != indices);
796 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
798 *indices = fIndexPool.makeSpace(indexCount,
799 &poolState.fPoolIndexBuffer,
800 &poolState.fPoolStartIndex);
801 return NULL != *indices;
804 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
805 GeometryPoolState& poolState = fGeoPoolStateStack.back();
806 const GeometrySrcState& geoSrc = this->getGeomSrc();
808 // If we get a release vertex space call then our current source should either be reserved
809 // or array (which we copied into reserved space).
810 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
811 kArray_GeometrySrcType == geoSrc.fVertexSrc);
813 // When the caller reserved vertex buffer space we gave it back a pointer
814 // provided by the vertex buffer pool. At each draw we tracked the largest
815 // offset into the pool's pointer that was referenced. Now we return to the
816 // pool any portion at the tail of the allocation that no draw referenced.
817 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
818 fVertexPool.putBack(reservedVertexBytes -
819 poolState.fUsedPoolVertexBytes);
820 poolState.fUsedPoolVertexBytes = 0;
821 poolState.fPoolVertexBuffer = NULL;
822 poolState.fPoolStartVertex = 0;
825 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
826 GeometryPoolState& poolState = fGeoPoolStateStack.back();
827 const GeometrySrcState& geoSrc = this->getGeomSrc();
829 // If we get a release index space call then our current source should either be reserved
830 // or array (which we copied into reserved space).
831 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
832 kArray_GeometrySrcType == geoSrc.fIndexSrc);
834 // Similar to releaseReservedVertexSpace we return any unused portion at
836 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
837 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
838 poolState.fUsedPoolIndexBytes = 0;
839 poolState.fPoolIndexBuffer = NULL;
840 poolState.fPoolStartIndex = 0;
843 void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
846 GeometryPoolState& poolState = fGeoPoolStateStack.back();
847 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
851 fVertexPool.appendVertices(this->getVertexSize(),
854 &poolState.fPoolVertexBuffer,
855 &poolState.fPoolStartVertex);
856 GR_DEBUGASSERT(success);
859 void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
861 GeometryPoolState& poolState = fGeoPoolStateStack.back();
862 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
866 fIndexPool.appendIndices(indexCount,
868 &poolState.fPoolIndexBuffer,
869 &poolState.fPoolStartIndex);
870 GR_DEBUGASSERT(success);
873 void GrInOrderDrawBuffer::releaseVertexArray() {
874 // When the client provides an array as the vertex source we handled it
875 // by copying their array into reserved space.
876 this->GrInOrderDrawBuffer::releaseReservedVertexSpace();
879 void GrInOrderDrawBuffer::releaseIndexArray() {
880 // When the client provides an array as the index source we handled it
881 // by copying their array into reserved space.
882 this->GrInOrderDrawBuffer::releaseReservedIndexSpace();
885 void GrInOrderDrawBuffer::geometrySourceWillPush() {
886 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
887 poolState.fUsedPoolVertexBytes = 0;
888 poolState.fUsedPoolIndexBytes = 0;
890 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
891 poolState.fPoolStartVertex = ~0;
892 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
893 poolState.fPoolStartIndex = ~0;
897 void GrInOrderDrawBuffer::geometrySourceWillPop(
898 const GeometrySrcState& restoredState) {
899 SkASSERT(fGeoPoolStateStack.count() > 1);
900 fGeoPoolStateStack.pop_back();
901 GeometryPoolState& poolState = fGeoPoolStateStack.back();
902 // we have to assume that any slack we had in our vertex/index data
903 // is now unreleasable because data may have been appended later in the
905 if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
906 kArray_GeometrySrcType == restoredState.fVertexSrc) {
907 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
909 if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
910 kArray_GeometrySrcType == restoredState.fIndexSrc) {
911 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
912 restoredState.fIndexCount;
916 bool GrInOrderDrawBuffer::needsNewState() const {
917 return fStates.empty() || !fStates.back().isEqual(this->getDrawState());
920 bool GrInOrderDrawBuffer::needsNewClip() const {
921 SkASSERT(fClips.count() == fClipOrigins.count());
922 if (this->getDrawState().isClipState()) {
925 fClips.back() != *this->getClip()->fClipStack ||
926 fClipOrigins.back() != this->getClip()->fOrigin)) {
933 void GrInOrderDrawBuffer::addToCmdBuffer(uint8_t cmd) {
934 SkASSERT(!cmd_has_trace_marker(cmd));
935 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
936 if (activeTraceMarkers.count() > 0) {
937 fCmds.push_back(add_trace_bit(cmd));
938 fGpuCmdMarkers.push_back(activeTraceMarkers);
940 fCmds.push_back(cmd);
944 void GrInOrderDrawBuffer::recordClip() {
945 fClips.push_back(*this->getClip()->fClipStack);
946 fClipOrigins.push_back() = this->getClip()->fOrigin;
948 this->addToCmdBuffer(kSetClip_Cmd);
951 void GrInOrderDrawBuffer::recordState() {
952 fStates.push_back().saveFrom(this->getDrawState());
953 this->addToCmdBuffer(kSetState_Cmd);
956 GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) {
957 this->addToCmdBuffer(kDraw_Cmd);
958 return &fDraws.push_back(info);
961 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() {
962 this->addToCmdBuffer(kStencilPath_Cmd);
963 return &fStencilPaths.push_back();
966 GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() {
967 this->addToCmdBuffer(kDrawPath_Cmd);
968 return &fDrawPath.push_back();
971 GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths() {
972 this->addToCmdBuffer(kDrawPaths_Cmd);
973 return &fDrawPaths.push_back();
976 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() {
977 this->addToCmdBuffer(kClear_Cmd);
978 return &fClears.push_back();
981 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() {
982 this->addToCmdBuffer(kCopySurface_Cmd);
983 return &fCopySurfaces.push_back();
987 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
988 INHERITED::clipWillBeSet(newClipData);
990 fClipProxyState = kUnknown_ClipProxyState;