GrGpu* gpu() { return fGpu; }
const GrGpu* gpu() const { return fGpu; }
-private:
bool isAbandoned() const {
SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
return !SkToBool(fCache);
}
+private:
GrResourceCache* fCache;
GrGpu* fGpu;
};
#include "GrBatchAtlas.h"
#include "GrPipeline.h"
-static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
-static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
-
-static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
-static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
-
GrBatchTarget::GrBatchTarget(GrGpu* gpu)
: fGpu(gpu)
+ , fVertexPool(gpu)
+ , fIndexPool(gpu)
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
, fIter(fFlushBuffer)
, fNumberOfDraws(0)
, fCurrentToken(0)
, fLastFlushedToken(0)
, fInlineUpdatesIndex(0) {
-
- fVertexPool.reset(SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu,
- DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
- DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)));
- fIndexPool.reset(SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu,
- DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
- DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)));
}
void GrBatchTarget::flushNext(int n) {
void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
const GrVertexBuffer** buffer, int* startVertex) {
- return fVertexPool->makeSpace(vertexSize, vertexCount, buffer, startVertex);
+ return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
const GrIndexBuffer** buffer, int* startIndex) {
- return reinterpret_cast<uint16_t*>(fIndexPool->makeSpace(indexCount, buffer, startIndex));
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
}
const GrIndexBuffer** buffer, int* startIndex);
// A helper for draws which overallocate and then return data to the pool
- void putBackIndices(size_t indices) { fIndexPool->putBack(indices * sizeof(uint16_t)); }
+ void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
void putBackVertices(size_t vertices, size_t vertexStride) {
- fVertexPool->putBack(vertices * vertexStride);
+ fVertexPool.putBack(vertices * vertexStride);
}
void reset() {
- fVertexPool->reset();
- fIndexPool->reset();
+ fVertexPool.reset();
+ fIndexPool.reset();
}
private:
void unmapVertexAndIndexBuffers() {
- fVertexPool->unmap();
- fIndexPool->unmap();
+ fVertexPool.unmap();
+ fIndexPool.unmap();
}
GrGpu* fGpu;
- SkAutoTDelete<GrVertexBufferAllocPool> fVertexPool;
- SkAutoTDelete<GrIndexBufferAllocPool> fIndexPool;
+ GrVertexBufferAllocPool fVertexPool;
+ GrIndexBufferAllocPool fIndexPool;
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrIndexBuffer.h"
+#include "GrResourceProvider.h"
#include "GrTypes.h"
#include "GrVertexBuffer.h"
static void VALIDATE(bool = false) {}
#endif
+static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
+static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
+
// page size
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
- size_t blockSize,
- int preallocBufferCnt)
- : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
+ size_t blockSize)
+ : fBlocks(8) {
fGpu = SkRef(gpu);
fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
fBytesInUse = 0;
-
- fPreallocBuffersInUse = 0;
- fPreallocBufferStartIdx = 0;
- for (int i = 0; i < preallocBufferCnt; ++i) {
- GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
- if (buffer) {
- *fPreallocBuffers.append() = buffer;
- }
- }
}
-GrBufferAllocPool::~GrBufferAllocPool() {
- VALIDATE();
+void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
while (!fBlocks.empty()) {
this->destroyBlock();
}
- fPreallocBuffers.unrefAll();
+ SkASSERT(!fBufferPtr);
+}
+
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
+ this->deleteBlocks();
fGpu->unref();
}
void GrBufferAllocPool::reset() {
VALIDATE();
fBytesInUse = 0;
- if (fBlocks.count()) {
- GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isMapped()) {
- UNMAP_BUFFER(fBlocks.back());
- }
- }
- // fPreallocBuffersInUse will be decremented down to zero in the while loop
- int preallocBuffersInUse = fPreallocBuffersInUse;
- while (!fBlocks.empty()) {
- this->destroyBlock();
- }
- if (fPreallocBuffers.count()) {
- // must set this after above loop.
- fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
- preallocBuffersInUse) %
- fPreallocBuffers.count();
- }
+ this->deleteBlocks();
// we may have created a large cpu mirror of a large VB. Reset the size
- // to match our pre-allocated VBs.
+ // to match our minimum.
fCpuData.reset(fMinBlockSize);
- SkASSERT(0 == fPreallocBuffersInUse);
VALIDATE();
}
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
- size_t pad = GrSizeAlignUpPad(usedBytes,
- alignment);
+ size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
if ((size + pad) <= back.fBytesFree) {
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
usedBytes += pad;
void GrBufferAllocPool::putBack(size_t bytes) {
VALIDATE();
- // if the putBack unwinds all the preallocated buffers then we will
- // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
- // will be decremented. I will reach zero if all blocks using preallocated
- // buffers are released.
- int preallocBuffersInUse = fPreallocBuffersInUse;
-
while (bytes) {
// caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty());
break;
}
}
- if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
- fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
- preallocBuffersInUse) %
- fPreallocBuffers.count();
- }
+
VALIDATE();
}
BufferBlock& block = fBlocks.push_back();
- if (size == fMinBlockSize &&
- fPreallocBuffersInUse < fPreallocBuffers.count()) {
-
- uint32_t nextBuffer = (fPreallocBuffersInUse +
- fPreallocBufferStartIdx) %
- fPreallocBuffers.count();
- block.fBuffer = fPreallocBuffers[nextBuffer];
- block.fBuffer->ref();
- ++fPreallocBuffersInUse;
- } else {
- block.fBuffer = this->createBuffer(size);
- if (NULL == block.fBuffer) {
- fBlocks.pop_back();
- return false;
- }
+ block.fBuffer = this->getBuffer(size);
+ if (NULL == block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
}
- block.fBytesFree = size;
+ block.fBytesFree = block.fBuffer->gpuMemorySize();
if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
// threshold.
bool attemptMap = block.fBuffer->isCPUBacked();
if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
- attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
+ attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD;
}
if (attemptMap) {
}
if (NULL == fBufferPtr) {
- fBufferPtr = fCpuData.reset(size);
+ fBufferPtr = fCpuData.reset(block.fBytesFree);
}
VALIDATE(true);
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
- if (fPreallocBuffersInUse > 0) {
- uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
- fPreallocBufferStartIdx +
- (fPreallocBuffers.count() - 1)) %
- fPreallocBuffers.count();
- if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
- --fPreallocBuffersInUse;
- }
- }
+
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
VALIDATE(true);
}
-GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
+GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
+
+ GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
+
if (kIndex_BufferType == fBufferType) {
- return fGpu->createIndexBuffer(size, true);
+ return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
} else {
SkASSERT(kVertex_BufferType == fBufferType);
- return fGpu->createVertexBuffer(size, true);
+ return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
}
}
////////////////////////////////////////////////////////////////////////////////
-GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
- size_t bufferSize,
- int preallocBufferCnt)
- : GrBufferAllocPool(gpu,
- kVertex_BufferType,
- bufferSize,
- preallocBufferCnt) {
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
////////////////////////////////////////////////////////////////////////////////
-GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
- size_t bufferSize,
- int preallocBufferCnt)
- : GrBufferAllocPool(gpu,
- kIndex_BufferType,
- bufferSize,
- preallocBufferCnt) {
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
}
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
* @param bufferSize The minimum size of created buffers.
* This value will be clamped to some
* reasonable minimum.
- * @param preallocBufferCnt The pool will allocate this number of
- * buffers at bufferSize and keep them until it
- * is destroyed.
*/
GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
- size_t bufferSize = 0,
- int preallocBufferCnt = 0);
+ size_t bufferSize = 0);
- virtual ~GrBufferAllocPool();
+ virtual ~GrBufferAllocPool();
/**
* Returns a block of memory to hold data. A buffer designated to hold the
const GrGeometryBuffer** buffer,
size_t* offset);
- GrGeometryBuffer* createBuffer(size_t size);
+ GrGeometryBuffer* getBuffer(size_t size);
private:
struct BufferBlock {
bool createBlock(size_t requestSize);
void destroyBlock();
+ void deleteBlocks();
void flushCpuData(const BufferBlock& block, size_t flushSize);
#ifdef SK_DEBUG
void validate(bool unusedBlockAllowed = false) const;
size_t fBytesInUse;
GrGpu* fGpu;
- SkTDArray<GrGeometryBuffer*> fPreallocBuffers;
size_t fMinBlockSize;
BufferType fBufferType;
SkTArray<BufferBlock> fBlocks;
- int fPreallocBuffersInUse;
- // We attempt to cycle through the preallocated buffers rather than
- // always starting from the first.
- int fPreallocBufferStartIdx;
SkAutoMalloc fCpuData;
void* fBufferPtr;
};
* Constructor
*
* @param gpu The GrGpu used to create the vertex buffers.
- * @param bufferSize The minimum size of created VBs. This value
- * will be clamped to some reasonable minimum.
- * @param preallocBufferCnt The pool will allocate this number of VBs at
- * bufferSize and keep them until it is
- * destroyed.
*/
- GrVertexBufferAllocPool(GrGpu* gpu, size_t bufferSize = 0, int preallocBufferCnt = 0);
+ GrVertexBufferAllocPool(GrGpu* gpu);
/**
* Returns a block of memory to hold vertices. A buffer designated to hold
* Constructor
*
* @param gpu The GrGpu used to create the index buffers.
- * @param bufferSize The minimum size of created IBs. This value
- * will be clamped to some reasonable minimum.
- * @param preallocBufferCnt The pool will allocate this number of VBs at
- * bufferSize and keep them until it is
- * destroyed.
*/
- GrIndexBufferAllocPool(GrGpu* gpu,
- size_t bufferSize = 0,
- int preallocBufferCnt = 0);
+ GrIndexBufferAllocPool(GrGpu* gpu);
/**
* Returns a block of memory to hold indices. A buffer designated to hold
#include "GrGeometryBuffer.h"
+
class GrIndexBuffer : public GrGeometryBuffer {
public:
+ static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+
+ GrScratchKey::Builder builder(key, kType, 2);
+
+ builder[0] = SkToUInt(size);
+ builder[1] = dynamic ? 1 : 0;
+ }
+
/**
* Retrieves the maximum number of quads that could be rendered
* from the index buffer (using kTriangles_GrPrimitiveType).
}
protected:
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
+ GrScratchKey key;
+ ComputeScratchKey(gpuMemorySize, dynamic, &key);
+ this->setScratchKey(key);
+ }
+
private:
typedef GrGeometryBuffer INHERITED;
};
const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
- GrIndexBuffer* buffer = this->gpu()->createIndexBuffer(bufferSize, /* dynamic = */ false);
+ GrIndexBuffer* buffer = this->getIndexBuffer(bufferSize, /* dynamic = */ false, true);
if (!buffer) {
return NULL;
}
return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
}
+GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic,
+ bool calledDuringFlush) {
+ if (this->isAbandoned()) {
+ return NULL;
+ }
+
+ if (dynamic) {
+ // bin by pow2 with a reasonable min
+ static const uint32_t MIN_SIZE = 1 << 12;
+ size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
+
+ GrScratchKey key;
+ GrIndexBuffer::ComputeScratchKey(size, dynamic, &key);
+ uint32_t scratchFlags = 0;
+ if (calledDuringFlush) {
+ scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ } else {
+ scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ }
+ GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
+ if (resource) {
+ return static_cast<GrIndexBuffer*>(resource);
+ }
+ }
+
+ return this->gpu()->createIndexBuffer(size, dynamic);
+}
+
+GrVertexBuffer* GrResourceProvider::getVertexBuffer(size_t size, bool dynamic,
+ bool calledDuringFlush) {
+ if (this->isAbandoned()) {
+ return NULL;
+ }
+
+ if (dynamic) {
+ // bin by pow2 with a reasonable min
+ static const uint32_t MIN_SIZE = 1 << 15;
+ size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
+
+ GrScratchKey key;
+ GrVertexBuffer::ComputeScratchKey(size, dynamic, &key);
+ uint32_t scratchFlags = 0;
+ if (calledDuringFlush) {
+ scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ } else {
+ scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ }
+ GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
+ if (resource) {
+ return static_cast<GrVertexBuffer*>(resource);
+ }
+ }
+
+ return this->gpu()->createVertexBuffer(size, dynamic);
+}
using GrTextureProvider::findAndRefResourceByUniqueKey;
using GrTextureProvider::abandon;
+ GrIndexBuffer* getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
+ GrVertexBuffer* getVertexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
+
private:
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
int patternSize,
#include "GrGeometryBuffer.h"
class GrVertexBuffer : public GrGeometryBuffer {
+public:
+ static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+
+ GrScratchKey::Builder builder(key, kType, 2);
+
+ builder[0] = SkToUInt(size);
+ builder[1] = dynamic ? 1 : 0;
+ }
+
protected:
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
+ GrScratchKey key;
+ ComputeScratchKey(gpuMemorySize, dynamic, &key);
+ this->setScratchKey(key);
+ }
+
private:
typedef GrGeometryBuffer INHERITED;
};