// This class is used to manage conversion of refs to pending reads/writes.
friend class GrGpuResourceRef;
+ template <typename T> friend class GrPendingIOResource;
};
/**
class GrGpuResource;
/**
+ * This class is intended only for internal use in core Gr code.
+ *
* Class that wraps a resource referenced by a GrProgramElement or GrDrawState. It manages
- * converting refs to pending io operations. Like SkAutoTUnref, its constructor and setter adopt
- * a ref from their caller. This class is intended only for internal use in core Gr code.
+ * converting refs to pending IO operations. It allows a resource ownership to be in three
+ * states:
+ * 1. Owns a single ref
+ * 2. Owns a single ref and a pending IO operation (read, write, or read-write)
+ * 3. Owns a single pending IO operation.
+ *
+ * It is legal to destroy the GrGpuResourceRef in any of these states. It starts in state
+ * 1. Calling markPendingIO() converts it from state 1 to state 2. Calling removeRef() goes from
+ * state 2 to state 3. Calling pendingIOComplete() moves from state 2 to state 1. There is no
+ * valid way of going from state 3 back to 2 or 1.
+ *
+ * Like SkAutoTUnref, its constructor and setter adopt a ref from their caller.
+ *
+ * TODO: Once GrDODrawState no longer exists and therefore GrDrawState and GrOptDrawState no
+ * longer share an instance of this class, attempt to make the resource owned by GrGpuResourceRef
+ * only settable via the constructor.
*/
class GrGpuResourceRef : SkNoncopyable {
public:
private:
/** Called by owning GrProgramElement when the program element is first scheduled for
- execution. */
+ execution. It can only be called once. */
void markPendingIO() const;
/** Called when the program element/draw state is no longer owned by GrDrawTarget-client code.
This lets the cache know that the drawing code will no longer schedule additional reads or
- writes to the resource using the program element or draw state. */
+ writes to the resource using the program element or draw state. It can only be called once.
+ */
void removeRef() const;
/** Called to indicate that the previous pending IO is complete. Useful when the owning object
still has refs, so it is not about to destroy this GrGpuResourceRef, but its previously
- pending executions have been complete.
- */
+ pending executions have been complete. Can only be called if removeRef() was not previously
+ called. */
void pendingIOComplete() const;
friend class GrRODrawState;
typedef SkNoncopyable INHERITED;
};
+/**
+ * Templated version of GrGpuResourceRef to enforce type safety.
+ */
template <typename T> class GrTGpuResourceRef : public GrGpuResourceRef {
public:
GrTGpuResourceRef() {}
typedef GrGpuResourceRef INHERITED;
};
+/**
+ * This is similar to GrTGpuResourceRef but can only be in the pending IO state. It never owns a
+ * ref.
+ */
+template <typename T> class GrPendingIOResource : SkNoncopyable {
+public:
+ typedef GrGpuResourceRef::IOType IOType;
+ GrPendingIOResource(T* resource, IOType ioType) : fResource(resource), fIOType(ioType) {
+ if (NULL != fResource) {
+ switch (fIOType) {
+ case GrGpuResourceRef::kNone_IOType:
+ SkFAIL("GrPendingIOResource with neither reads nor writes?");
+ break;
+ case GrGpuResourceRef::kRead_IOType:
+ fResource->addPendingRead();
+ break;
+ case GrGpuResourceRef::kWrite_IOType:
+ fResource->addPendingWrite();
+ break;
+ case GrGpuResourceRef::kRW_IOType:
+ fResource->addPendingRead();
+ fResource->addPendingWrite();
+ break;
+ }
+ }
+ }
+
+ ~GrPendingIOResource() {
+ if (NULL != fResource) {
+ switch (fIOType) {
+ case GrGpuResourceRef::kNone_IOType:
+ SkFAIL("GrPendingIOResource with neither reads nor writes?");
+ break;
+ case GrGpuResourceRef::kRead_IOType:
+ fResource->completedRead();
+ break;
+ case GrGpuResourceRef::kWrite_IOType:
+ fResource->completedWrite();
+ break;
+ case GrGpuResourceRef::kRW_IOType:
+ fResource->completedRead();
+ fResource->completedWrite();
+ break;
+ }
+ }
+ }
+
+ T* get() const { return fResource; }
+private:
+ T* fResource;
+ IOType fIOType;
+};
#endif
typedef SkNoncopyable INHERITED;
};
-template <typename T>
-class GrTAllocator : SkNoncopyable {
+template <typename T> class GrTAllocator;
+template <typename T> void* operator new(size_t, GrTAllocator<T>*);
+
+template <typename T> class GrTAllocator : SkNoncopyable {
public:
virtual ~GrTAllocator() { this->reset(); };
}
private:
+ friend void* operator new<T>(size_t, GrTAllocator*);
+
GrAllocator fAllocator;
typedef SkNoncopyable INHERITED;
};
SkAlignedSTStorage<N, T> fStorage;
};
+template <typename T> void* operator new(size_t size, GrTAllocator<T>* allocator) {
+ return allocator->fAllocator.push_back();
+}
+
+// Skia doesn't use C++ exceptions but it may be compiled with them enabled. Having an op delete
+// to match the op new silences warnings about missing op delete when a constructor throws an
+// exception.
+template <typename T> void operator delete(void*, GrTAllocator<T>*) {
+ SK_CRASH();
+}
+
+#define GrNEW_APPEND_TO_ALLOCATOR(allocator_ptr, type_name, args) \
+ new (allocator_ptr) type_name args
+
#endif
fResource->addPendingRead();
fResource->addPendingWrite();
break;
-
}
}
#include "GrDrawTargetCaps.h"
#include "GrTextStrike.h"
#include "GrGpu.h"
-#include "GrIndexBuffer.h"
-#include "GrPath.h"
-#include "GrPathRange.h"
-#include "GrRenderTarget.h"
#include "GrTemplates.h"
#include "GrTexture.h"
-#include "GrVertexBuffer.h"
GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
GrVertexBufferAllocPool* vertexPool,
return 0;
}
- DrawRecord* draw = &fDraws.back();
+ Draw* draw = &fDraws.back();
GeometryPoolState& poolState = fGeoPoolStateStack.back();
const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
if (!draw->isInstanced() ||
draw->verticesPerInstance() != info.verticesPerInstance() ||
draw->indicesPerInstance() != info.indicesPerInstance() ||
- draw->fVertexBuffer != vertexBuffer ||
- draw->fIndexBuffer != geomSrc.fIndexBuffer) {
+ draw->vertexBuffer() != vertexBuffer ||
+ draw->indexBuffer() != geomSrc.fIndexBuffer) {
return 0;
}
// info does not yet account for the offset from the start of the pool's VB while the previous
}
this->recordStateIfNecessary();
- DrawRecord* draw;
- if (info.isInstanced()) {
- int instancesConcated = this->concatInstancedDraw(info);
- if (info.instanceCount() > instancesConcated) {
- draw = this->recordDraw(info);
- draw->adjustInstanceCount(-instancesConcated);
- } else {
- return;
- }
+ const GrVertexBuffer* vb;
+ if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) {
+ vb = this->getGeomSrc().fVertexBuffer;
} else {
- draw = this->recordDraw(info);
+ vb = poolState.fPoolVertexBuffer;
}
- switch (this->getGeomSrc().fVertexSrc) {
- case kBuffer_GeometrySrcType:
- draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer;
- break;
- case kReserved_GeometrySrcType: // fallthrough
- case kArray_GeometrySrcType: {
- size_t vertexBytes = (info.vertexCount() + info.startVertex()) *
- drawState.getVertexStride();
- poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
- draw->fVertexBuffer = poolState.fPoolVertexBuffer;
- draw->adjustStartVertex(poolState.fPoolStartVertex);
- break;
+ const GrIndexBuffer* ib = NULL;
+ if (info.isIndexed()) {
+ if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) {
+ ib = this->getGeomSrc().fIndexBuffer;
+ } else {
+ ib = poolState.fPoolIndexBuffer;
}
- default:
- SkFAIL("unknown geom src type");
}
- draw->fVertexBuffer->ref();
- if (info.isIndexed()) {
- switch (this->getGeomSrc().fIndexSrc) {
- case kBuffer_GeometrySrcType:
- draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer;
- break;
- case kReserved_GeometrySrcType: // fallthrough
- case kArray_GeometrySrcType: {
- size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
- poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, indexBytes);
- draw->fIndexBuffer = poolState.fPoolIndexBuffer;
- draw->adjustStartIndex(poolState.fPoolStartIndex);
- break;
- }
- default:
- SkFAIL("unknown geom src type");
+ Draw* draw;
+ if (info.isInstanced()) {
+ int instancesConcated = this->concatInstancedDraw(info);
+ if (info.instanceCount() > instancesConcated) {
+ draw = this->recordDraw(info, vb, ib);
+ draw->adjustInstanceCount(-instancesConcated);
+ } else {
+ return;
}
- draw->fIndexBuffer->ref();
} else {
- draw->fIndexBuffer = NULL;
+ draw = this->recordDraw(info, vb, ib);
}
-}
-GrInOrderDrawBuffer::StencilPath::StencilPath() {}
-GrInOrderDrawBuffer::DrawPath::DrawPath() {}
-GrInOrderDrawBuffer::DrawPaths::DrawPaths() {}
-GrInOrderDrawBuffer::DrawPaths::~DrawPaths() {
- if (fTransforms) {
- SkDELETE_ARRAY(fTransforms);
+ // Adjust the starting vertex and index when we are using reserved or array sources to
+ // compensate for the fact that the data was inserted into a larger vb/ib owned by the pool.
+ if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
+ size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.getVertexStride();
+ poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
+ draw->adjustStartVertex(poolState.fPoolStartVertex);
}
- if (fIndices) {
- SkDELETE_ARRAY(fIndices);
+
+ if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndexSrc) {
+ size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
+ poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, bytes);
+ draw->adjustStartIndex(poolState.fPoolStartIndex);
}
}
}
// Only compare the subset of GrDrawState relevant to path stenciling?
this->recordStateIfNecessary();
- StencilPath* sp = this->recordStencilPath();
- sp->fPath.reset(path);
- path->ref();
+ StencilPath* sp = this->recordStencilPath(path);
sp->fFill = fill;
}
}
// TODO: Only compare the subset of GrDrawState relevant to path covering?
this->recordStateIfNecessary();
- DrawPath* cp = this->recordDrawPath();
- cp->fPath.reset(path);
- path->ref();
+ DrawPath* cp = this->recordDrawPath(path);
cp->fFill = fill;
if (dstCopy) {
cp->fDstCopy = *dstCopy;
this->recordClip();
}
this->recordStateIfNecessary();
- DrawPaths* dp = this->recordDrawPaths();
- dp->fPathRange.reset(SkRef(pathRange));
+ DrawPaths* dp = this->recordDrawPaths(pathRange);
dp->fIndices = SkNEW_ARRAY(uint32_t, count); // TODO: Accomplish this without a malloc
memcpy(dp->fIndices, indices, sizeof(uint32_t) * count);
dp->fCount = count;
r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
rect = &r;
}
- Clear* clr = this->recordClear();
+ Clear* clr = this->recordClear(renderTarget);
GrColorIsPMAssert(color);
clr->fColor = color;
clr->fRect = *rect;
clr->fCanIgnoreRect = canIgnoreRect;
- clr->fRenderTarget = renderTarget;
- renderTarget->ref();
}
void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
renderTarget = this->drawState()->getRenderTarget();
SkASSERT(renderTarget);
}
- Clear* clr = this->recordClear();
+ Clear* clr = this->recordClear(renderTarget);
clr->fColor = GrColor_ILLEGAL;
- clr->fRenderTarget = renderTarget;
- renderTarget->ref();
}
void GrInOrderDrawBuffer::reset() {
this->resetVertexSource();
this->resetIndexSource();
- DrawAllocator::Iter drawIter(&fDraws);
- while (drawIter.next()) {
- // we always have a VB, but not always an IB
- SkASSERT(drawIter->fVertexBuffer);
- drawIter->fVertexBuffer->unref();
- SkSafeUnref(drawIter->fIndexBuffer);
- }
fCmds.reset();
fDraws.reset();
fStencilPaths.reset();
case kDraw_Cmd: {
SkASSERT(fDstGpu->drawState() != prevDrawState);
SkAssertResult(drawIter.next());
- fDstGpu->setVertexSourceToBuffer(drawIter->fVertexBuffer);
+ fDstGpu->setVertexSourceToBuffer(drawIter->vertexBuffer());
if (drawIter->isIndexed()) {
- fDstGpu->setIndexSourceToBuffer(drawIter->fIndexBuffer);
+ fDstGpu->setIndexSourceToBuffer(drawIter->indexBuffer());
}
fDstGpu->executeDraw(*drawIter);
break;
case kStencilPath_Cmd: {
SkASSERT(fDstGpu->drawState() != prevDrawState);
SkAssertResult(stencilPathIter.next());
- fDstGpu->stencilPath(stencilPathIter->fPath.get(), stencilPathIter->fFill);
+ fDstGpu->stencilPath(stencilPathIter->path(), stencilPathIter->fFill);
break;
}
case kDrawPath_Cmd: {
SkASSERT(fDstGpu->drawState() != prevDrawState);
SkAssertResult(drawPathIter.next());
- fDstGpu->executeDrawPath(drawPathIter->fPath.get(), drawPathIter->fFill,
+ fDstGpu->executeDrawPath(drawPathIter->path(), drawPathIter->fFill,
drawPathIter->fDstCopy.texture() ?
&drawPathIter->fDstCopy :
NULL);
SkAssertResult(drawPathsIter.next());
const GrDeviceCoordTexture* dstCopy =
drawPathsIter->fDstCopy.texture() ? &drawPathsIter->fDstCopy : NULL;
- fDstGpu->executeDrawPaths(drawPathsIter->fPathRange.get(),
+ fDstGpu->executeDrawPaths(drawPathsIter->pathRange(),
drawPathsIter->fIndices,
drawPathsIter->fCount,
drawPathsIter->fTransforms,
case kClear_Cmd:
SkAssertResult(clearIter.next());
if (GrColor_ILLEGAL == clearIter->fColor) {
- fDstGpu->discard(clearIter->fRenderTarget);
+ fDstGpu->discard(clearIter->renderTarget());
} else {
fDstGpu->clear(&clearIter->fRect,
clearIter->fColor,
clearIter->fCanIgnoreRect,
- clearIter->fRenderTarget);
+ clearIter->renderTarget());
}
break;
case kCopySurface_Cmd:
SkAssertResult(copySurfaceIter.next());
- fDstGpu->copySurface(copySurfaceIter->fDst.get(),
- copySurfaceIter->fSrc.get(),
+ fDstGpu->copySurface(copySurfaceIter->dst(),
+ copySurfaceIter->src(),
copySurfaceIter->fSrcRect,
copySurfaceIter->fDstPoint);
break;
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
- CopySurface* cs = this->recordCopySurface();
- cs->fDst.reset(SkRef(dst));
- cs->fSrc.reset(SkRef(src));
+ CopySurface* cs = this->recordCopySurface(dst, src);
cs->fSrcRect = srcRect;
cs->fDstPoint = dstPoint;
return true;
poolState.fPoolStartIndex = 0;
}
-void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
- int vertexCount) {
-
+void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) {
GeometryPoolState& poolState = fGeoPoolStateStack.back();
SkASSERT(0 == poolState.fUsedPoolVertexBytes);
#ifdef SK_DEBUG
#endif
}
-void GrInOrderDrawBuffer::geometrySourceWillPop(
- const GeometrySrcState& restoredState) {
+void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restoredState) {
SkASSERT(fGeoPoolStateStack.count() > 1);
fGeoPoolStateStack.pop_back();
GeometryPoolState& poolState = fGeoPoolStateStack.back();
this->addToCmdBuffer(kSetClip_Cmd);
}
-GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) {
+GrInOrderDrawBuffer::Draw* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info,
+ const GrVertexBuffer* vb,
+ const GrIndexBuffer* ib) {
this->addToCmdBuffer(kDraw_Cmd);
- return &fDraws.push_back(info);
+ return GrNEW_APPEND_TO_ALLOCATOR(&fDraws, Draw, (info, vb, ib));
}
-GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() {
+GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath(const GrPath* path) {
this->addToCmdBuffer(kStencilPath_Cmd);
- return &fStencilPaths.push_back();
+ return GrNEW_APPEND_TO_ALLOCATOR(&fStencilPaths, StencilPath, (path));
}
-GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() {
+GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath(const GrPath* path) {
this->addToCmdBuffer(kDrawPath_Cmd);
- return &fDrawPath.push_back();
+ return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPath, DrawPath, (path));
}
-GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths() {
+GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths(const GrPathRange* pathRange) {
this->addToCmdBuffer(kDrawPaths_Cmd);
- return &fDrawPaths.push_back();
+ return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPaths, DrawPaths, (pathRange));
}
-GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() {
+GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear(GrRenderTarget* rt) {
this->addToCmdBuffer(kClear_Cmd);
- return &fClears.push_back();
+ return GrNEW_APPEND_TO_ALLOCATOR(&fClears, Clear, (rt));
}
-GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() {
+GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface(GrSurface* dst,
+ GrSurface* src) {
this->addToCmdBuffer(kCopySurface_Cmd);
- return &fCopySurfaces.push_back();
+ return GrNEW_APPEND_TO_ALLOCATOR(&fCopySurfaces, CopySurface, (dst, src));
}
void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
#include "GrDrawTarget.h"
#include "GrAllocPool.h"
#include "GrAllocator.h"
+#include "GrIndexBuffer.h"
+#include "GrRenderTarget.h"
#include "GrPath.h"
+#include "GrPathRange.h"
+#include "GrSurface.h"
+#include "GrVertexBuffer.h"
#include "SkClipStack.h"
#include "SkTemplates.h"
class GrGpu;
class GrIndexBufferAllocPool;
-class GrPathRange;
class GrVertexBufferAllocPool;
/**
kDrawPaths_Cmd = 8,
};
- class DrawRecord : public DrawInfo {
+ class Draw : public DrawInfo {
public:
- DrawRecord(const DrawInfo& info) : DrawInfo(info) {}
- const GrVertexBuffer* fVertexBuffer;
- const GrIndexBuffer* fIndexBuffer;
+ Draw(const DrawInfo& info, const GrVertexBuffer* vb, const GrIndexBuffer* ib)
+ : DrawInfo(info)
+ , fVertexBuffer(vb, GrGpuResourceRef::kRead_IOType)
+ , fIndexBuffer(ib, GrGpuResourceRef::kRead_IOType) {}
+
+ const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
+ const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); }
+
+ private:
+ GrPendingIOResource<const GrVertexBuffer> fVertexBuffer;
+ GrPendingIOResource<const GrIndexBuffer> fIndexBuffer;
};
struct StencilPath : public ::SkNoncopyable {
- StencilPath();
+ StencilPath(const GrPath* path) : fPath(path, GrGpuResourceRef::kRead_IOType) {}
+
+ const GrPath* path() const { return fPath.get(); }
- SkAutoTUnref<const GrPath> fPath;
- SkPath::FillType fFill;
+ SkPath::FillType fFill;
+
+ private:
+ GrPendingIOResource<const GrPath> fPath;
};
struct DrawPath : public ::SkNoncopyable {
- DrawPath();
+ DrawPath(const GrPath* path) : fPath(path, GrGpuResourceRef::kRead_IOType) {}
+
+ const GrPath* path() const { return fPath.get(); }
- SkAutoTUnref<const GrPath> fPath;
- SkPath::FillType fFill;
- GrDeviceCoordTexture fDstCopy;
+ SkPath::FillType fFill;
+ GrDeviceCoordTexture fDstCopy;
+
+ private:
+ GrPendingIOResource<const GrPath> fPath;
};
struct DrawPaths : public ::SkNoncopyable {
- DrawPaths();
- ~DrawPaths();
-
- SkAutoTUnref<const GrPathRange> fPathRange;
- uint32_t* fIndices;
- size_t fCount;
- float* fTransforms;
- PathTransformType fTransformsType;
- SkPath::FillType fFill;
- GrDeviceCoordTexture fDstCopy;
+ DrawPaths(const GrPathRange* pathRange)
+ : fPathRange(pathRange, GrGpuResourceRef::kRead_IOType) {}
+
+ ~DrawPaths() {
+ if (fTransforms) {
+ SkDELETE_ARRAY(fTransforms);
+ }
+ if (fIndices) {
+ SkDELETE_ARRAY(fIndices);
+ }
+ }
+
+ const GrPathRange* pathRange() const { return fPathRange.get(); }
+
+ uint32_t* fIndices;
+ size_t fCount;
+ float* fTransforms;
+ PathTransformType fTransformsType;
+ SkPath::FillType fFill;
+ GrDeviceCoordTexture fDstCopy;
+
+ private:
+ GrPendingIOResource<const GrPathRange> fPathRange;
};
// This is also used to record a discard by setting the color to GrColor_ILLEGAL
struct Clear : public ::SkNoncopyable {
- Clear() : fRenderTarget(NULL) {}
- ~Clear() { SkSafeUnref(fRenderTarget); }
+ Clear(GrRenderTarget* rt) : fRenderTarget(rt, GrGpuResourceRef::kWrite_IOType) {}
+ ~Clear() { }
+ GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
+
+ SkIRect fRect;
+ GrColor fColor;
+ bool fCanIgnoreRect;
- SkIRect fRect;
- GrColor fColor;
- bool fCanIgnoreRect;
- GrRenderTarget* fRenderTarget;
+ private:
+ GrPendingIOResource<GrRenderTarget> fRenderTarget;
};
struct CopySurface : public ::SkNoncopyable {
- SkAutoTUnref<GrSurface> fDst;
- SkAutoTUnref<GrSurface> fSrc;
- SkIRect fSrcRect;
- SkIPoint fDstPoint;
+ CopySurface(GrSurface* dst, GrSurface* src)
+ : fDst(dst, GrGpuResourceRef::kWrite_IOType)
+ , fSrc(src, GrGpuResourceRef::kRead_IOType) {}
+
+ GrSurface* dst() const { return fDst.get(); }
+ GrSurface* src() const { return fSrc.get(); }
+
+ SkIPoint fDstPoint;
+ SkIRect fSrcRect;
+
+ private:
+ GrPendingIOResource<GrSurface> fDst;
+ GrPendingIOResource<GrSurface> fSrc;
};
struct Clip : public ::SkNoncopyable {
// these functions record a command
void recordState();
void recordClip();
- DrawRecord* recordDraw(const DrawInfo&);
- StencilPath* recordStencilPath();
- DrawPath* recordDrawPath();
- DrawPaths* recordDrawPaths();
- Clear* recordClear();
- CopySurface* recordCopySurface();
+ Draw* recordDraw(const DrawInfo&, const GrVertexBuffer*, const GrIndexBuffer*);
+ StencilPath* recordStencilPath(const GrPath*);
+ DrawPath* recordDrawPath(const GrPath*);
+ DrawPaths* recordDrawPaths(const GrPathRange*);
+ Clear* recordClear(GrRenderTarget*);
+ CopySurface* recordCopySurface(GrSurface* dst, GrSurface* src);
+
+ virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; }
+ void addToCmdBuffer(uint8_t cmd);
// TODO: Use a single allocator for commands and records
enum {
kCopySurfacePreallocCnt = 4,
};
- typedef GrTAllocator<DrawRecord> DrawAllocator;
- typedef GrTAllocator<StencilPath> StencilPathAllocator;
- typedef GrTAllocator<DrawPath> DrawPathAllocator;
- typedef GrTAllocator<DrawPaths> DrawPathsAllocator;
- typedef GrTAllocator<GrDrawState> StateAllocator;
- typedef GrTAllocator<Clear> ClearAllocator;
- typedef GrTAllocator<CopySurface> CopySurfaceAllocator;
- typedef GrTAllocator<Clip> ClipAllocator;
-
- GrSTAllocator<kDrawPreallocCnt, DrawRecord> fDraws;
- GrSTAllocator<kStencilPathPreallocCnt, StencilPath> fStencilPaths;
- GrSTAllocator<kDrawPathPreallocCnt, DrawPath> fDrawPath;
- GrSTAllocator<kDrawPathsPreallocCnt, DrawPaths> fDrawPaths;
- GrSTAllocator<kStatePreallocCnt, GrDrawState> fStates;
- GrSTAllocator<kClearPreallocCnt, Clear> fClears;
- GrSTAllocator<kCopySurfacePreallocCnt, CopySurface> fCopySurfaces;
- GrSTAllocator<kClipPreallocCnt, Clip> fClips;
-
- SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
-
- SkSTArray<kCmdPreallocCnt, uint8_t, true> fCmds;
-
- GrDrawTarget* fDstGpu;
-
- bool fClipSet;
+ typedef GrTAllocator<Draw> DrawAllocator;
+ typedef GrTAllocator<StencilPath> StencilPathAllocator;
+ typedef GrTAllocator<DrawPath> DrawPathAllocator;
+ typedef GrTAllocator<DrawPaths> DrawPathsAllocator;
+ typedef GrTAllocator<GrDrawState> StateAllocator;
+ typedef GrTAllocator<Clear> ClearAllocator;
+ typedef GrTAllocator<CopySurface> CopySurfaceAllocator;
+ typedef GrTAllocator<Clip> ClipAllocator;
+
+ GrSTAllocator<kDrawPreallocCnt, Draw> fDraws;
+ GrSTAllocator<kStencilPathPreallocCnt, StencilPath> fStencilPaths;
+ GrSTAllocator<kDrawPathPreallocCnt, DrawPath> fDrawPath;
+ GrSTAllocator<kDrawPathsPreallocCnt, DrawPaths> fDrawPaths;
+ GrSTAllocator<kStatePreallocCnt, GrDrawState> fStates;
+ GrSTAllocator<kClearPreallocCnt, Clear> fClears;
+ GrSTAllocator<kCopySurfacePreallocCnt, CopySurface> fCopySurfaces;
+ GrSTAllocator<kClipPreallocCnt, Clip> fClips;
+
+ SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
+ SkSTArray<kCmdPreallocCnt, uint8_t, true> fCmds;
+ GrDrawTarget* fDstGpu;
+ bool fClipSet;
enum ClipProxyState {
kUnknown_ClipProxyState,
kValid_ClipProxyState,
kInvalid_ClipProxyState
};
- ClipProxyState fClipProxyState;
- SkRect fClipProxy;
-
- GrVertexBufferAllocPool& fVertexPool;
- GrIndexBufferAllocPool& fIndexPool;
+ ClipProxyState fClipProxyState;
+ SkRect fClipProxy;
+ GrVertexBufferAllocPool& fVertexPool;
+ GrIndexBufferAllocPool& fIndexPool;
struct GeometryPoolState {
- const GrVertexBuffer* fPoolVertexBuffer;
- int fPoolStartVertex;
- const GrIndexBuffer* fPoolIndexBuffer;
- int fPoolStartIndex;
+ const GrVertexBuffer* fPoolVertexBuffer;
+ int fPoolStartVertex;
+ const GrIndexBuffer* fPoolIndexBuffer;
+ int fPoolStartIndex;
// caller may conservatively over reserve vertices / indices.
// we release unused space back to allocator if possible
// can only do this if there isn't an intervening pushGeometrySource()
- size_t fUsedPoolVertexBytes;
- size_t fUsedPoolIndexBytes;
+ size_t fUsedPoolVertexBytes;
+ size_t fUsedPoolIndexBytes;
};
- SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> fGeoPoolStateStack;
- virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; }
-
- void addToCmdBuffer(uint8_t cmd);
+ typedef SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> GeoPoolStateStack;
- bool fFlushing;
- uint32_t fDrawID;
+ GeoPoolStateStack fGeoPoolStateStack;
+ bool fFlushing;
+ uint32_t fDrawID;
typedef GrDrawTarget INHERITED;
};