#if SK_SUPPORT_GPU
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrContext.h"
#include "GrPathUtils.h"
#include "GrTest.h"
return &fGeometry;
}
- void onGenerateGeometry(GrBatchTarget* batchTarget) override {
+ void generateGeometry(Target* target) override {
QuadHelper helper;
size_t vertexStride = this->geometryProcessor()->getVertexStride();
SkASSERT(vertexStride == sizeof(Vertex));
- Vertex* verts = reinterpret_cast<Vertex*>(helper.init(batchTarget, vertexStride, 1));
+ Vertex* verts = reinterpret_cast<Vertex*>(helper.init(target, vertexStride, 1));
if (!verts) {
return;
}
verts[v].fKLM[1] = eval_line(verts[v].fPosition, fKlmEqs + 3, fSign);
verts[v].fKLM[2] = eval_line(verts[v].fPosition, fKlmEqs + 6, 1.f);
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
Geometry fGeometry;
return &fGeometry;
}
- void onGenerateGeometry(GrBatchTarget* batchTarget) override {
+ void generateGeometry(Target* target) override {
QuadHelper helper;
size_t vertexStride = this->geometryProcessor()->getVertexStride();
SkASSERT(vertexStride == sizeof(Vertex));
- Vertex* verts = reinterpret_cast<Vertex*>(helper.init(batchTarget, vertexStride, 1));
+ Vertex* verts = reinterpret_cast<Vertex*>(helper.init(target, vertexStride, 1));
if (!verts) {
return;
}
fGeometry.fBounds.fRight, fGeometry.fBounds.fBottom,
sizeof(Vertex));
fDevToUV.apply<4, sizeof(Vertex), sizeof(SkPoint)>(verts);
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
Geometry fGeometry;
#if SK_SUPPORT_GPU
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrPathUtils.h"
return &fGeometry;
}
- void onGenerateGeometry(GrBatchTarget* batchTarget) override {
+ void generateGeometry(Target* target) override {
size_t vertexStride = this->geometryProcessor()->getVertexStride();
SkASSERT(vertexStride == sizeof(SkPoint));
QuadHelper helper;
- SkPoint* verts = reinterpret_cast<SkPoint*>(helper.init(batchTarget, vertexStride, 1));
+ SkPoint* verts = reinterpret_cast<SkPoint*>(helper.init(target, vertexStride, 1));
if (!verts) {
return;
}
fGeometry.fBounds.outset(5.f, 5.f);
fGeometry.fBounds.toQuad(verts);
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
Geometry fGeometry;
'<(skia_src_path)/gpu/GrBatchAtlas.h',
'<(skia_src_path)/gpu/GrBatchFontCache.cpp',
'<(skia_src_path)/gpu/GrBatchFontCache.h',
- '<(skia_src_path)/gpu/GrBatchTarget.cpp',
- '<(skia_src_path)/gpu/GrBatchTarget.h',
+ '<(skia_src_path)/gpu/GrBatchFlushState.cpp',
+ '<(skia_src_path)/gpu/GrBatchFlushState.h',
'<(skia_src_path)/gpu/GrBatchTest.cpp',
'<(skia_src_path)/gpu/GrBatchTest.h',
'<(skia_src_path)/gpu/GrBlurUtils.cpp',
#include "GrAAConvexPathRenderer.h"
#include "GrAAConvexTessellator.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrContext.h"
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void generateGeometryLinesOnly(GrBatchTarget* batchTarget) {
+ void prepareLinesOnlyDraws(Target* target) {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, tess.numPts(),
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
+ &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t* idxs = batchTarget->makeIndexSpace(tess.numIndices(),
- &indexBuffer, &firstIndex);
+ uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
vertexBuffer, indexBuffer,
firstVertex, firstIndex,
tess.numPts(), tess.numIndices());
- batchTarget->draw(info);
+ target->draw(info);
}
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
#ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
if (this->linesOnly()) {
- this->generateGeometryLinesOnly(batchTarget);
+ this->prepareLinesOnlyDraws(target);
return;
}
#endif
SkAutoTUnref<GrGeometryProcessor> quadProcessor(
QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoords()));
- batchTarget->initDraw(quadProcessor, this->pipeline());
+ target->initDraw(quadProcessor, this->pipeline());
// TODO generate all segments for all paths and use one vertex buffer
for (int i = 0; i < instanceCount; i++) {
int firstVertex;
size_t vertexStride = quadProcessor->getVertexStride();
- QuadVertex* verts = reinterpret_cast<QuadVertex*>(batchTarget->makeVertSpace(
+ QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
vertexStride, vertexCount, &vertexBuffer, &firstVertex));
if (!verts) {
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t *idxs = batchTarget->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
const Draw& draw = draws[i];
vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
- batchTarget->draw(vertices);
+ target->draw(vertices);
firstVertex += draw.fVertexCnt;
firstIndex += draw.fIndexCnt;
}
#include "GrAADistanceFieldPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrPipelineBuilder.h"
int fInstancesToFlush;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
int instanceCount = fGeoData.count();
SkMatrix invert;
flags,
this->usesLocalCoords()));
- batchTarget->initDraw(dfProcessor, this->pipeline());
+ target->initDraw(dfProcessor, this->pipeline());
FlushInfo flushInfo;
SkASSERT(vertexStride == 2 * sizeof(SkPoint));
const GrVertexBuffer* vertexBuffer;
- void* vertices = batchTarget->makeVertSpace(vertexStride,
- kVerticesPerQuad * instanceCount,
- &vertexBuffer,
- &flushInfo.fVertexOffset);
+ void* vertices = target->makeVertexSpace(vertexStride,
+ kVerticesPerQuad * instanceCount,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
- flushInfo.fIndexBuffer.reset(batchTarget->resourceProvider()->refQuadIndexBuffer());
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
if (!vertices || !flushInfo.fIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
return;
}
SkScalar scale = desiredDimension/maxDim;
args.fPathData = SkNEW(PathData);
- if (!this->addPathToAtlas(batchTarget,
+ if (!this->addPathToAtlas(target,
dfProcessor,
this->pipeline(),
&flushInfo,
}
}
- atlas->setLastUseToken(args.fPathData->fID, batchTarget->currentToken());
+ atlas->setLastUseToken(args.fPathData->fID, target->currentToken());
// Now set vertices
intptr_t offset = reinterpret_cast<intptr_t>(vertices);
offset += i * kVerticesPerQuad * vertexStride;
SkPoint* positions = reinterpret_cast<SkPoint*>(offset);
- this->writePathVertices(batchTarget,
+ this->writePathVertices(target,
atlas,
this->pipeline(),
dfProcessor,
flushInfo.fInstancesToFlush++;
}
- this->flush(batchTarget, &flushInfo);
+ this->flush(target, &flushInfo);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
viewMatrix.mapRect(&fBounds);
}
- bool addPathToAtlas(GrBatchTarget* batchTarget,
+ bool addPathToAtlas(GrVertexBatch::Target* target,
const GrGeometryProcessor* dfProcessor,
const GrPipeline* pipeline,
FlushInfo* flushInfo,
// add to atlas
SkIPoint16 atlasLocation;
GrBatchAtlas::AtlasID id;
- bool success = atlas->addToAtlas(&id, batchTarget, width, height, dfStorage.get(),
+ bool success = atlas->addToAtlas(&id, target, width, height, dfStorage.get(),
&atlasLocation);
if (!success) {
- this->flush(batchTarget, flushInfo);
- batchTarget->initDraw(dfProcessor, pipeline);
+ this->flush(target, flushInfo);
+ target->initDraw(dfProcessor, pipeline);
- SkDEBUGCODE(success =) atlas->addToAtlas(&id, batchTarget, width, height,
+ SkDEBUGCODE(success =) atlas->addToAtlas(&id, target, width, height,
dfStorage.get(), &atlasLocation);
SkASSERT(success);
return true;
}
- void writePathVertices(GrBatchTarget* target,
+ void writePathVertices(GrDrawBatch::Target* target,
GrBatchAtlas* atlas,
const GrPipeline* pipeline,
const GrGeometryProcessor* gp,
vertexStride);
}
- void flush(GrBatchTarget* batchTarget, FlushInfo* flushInfo) {
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
GrVertices vertices;
int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
- batchTarget->draw(vertices);
+ target->draw(vertices);
flushInfo->fVertexOffset += kVerticesPerQuad * flushInfo->fInstancesToFlush;
flushInfo->fInstancesToFlush = 0;
}
#include "GrAAHairLinePathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrContext.h"
fBatch.fCoverage = fGeoData[0].fCoverage;
}
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
typedef SkTArray<SkPoint, true> PtArray;
typedef SkTArray<int, true> IntArray;
typedef SkTArray<float, true> FloatArray;
SkSTArray<1, Geometry, true> fGeoData;
};
-void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void AAHairlineBatch::onPrepareDraws(Target* target) {
// Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
GrQuadEffect::Create(this->color(),
*geometryProcessorViewM,
kHairlineAA_GrProcessorEdgeType,
- batchTarget->caps(),
+ target->caps(),
*geometryProcessorLocalM,
this->usesLocalCoords(),
this->coverage()));
GrConicEffect::Create(this->color(),
*geometryProcessorViewM,
kHairlineAA_GrProcessorEdgeType,
- batchTarget->caps(),
+ target->caps(),
*geometryProcessorLocalM,
this->usesLocalCoords(),
this->coverage()));
// do lines first
if (lineCount) {
SkAutoTUnref<const GrIndexBuffer> linesIndexBuffer(
- ref_lines_index_buffer(batchTarget->resourceProvider()));
- batchTarget->initDraw(lineGP, this->pipeline());
+ ref_lines_index_buffer(target->resourceProvider()));
+ target->initDraw(lineGP, this->pipeline());
const GrVertexBuffer* vertexBuffer;
int firstVertex;
size_t vertexStride = lineGP->getVertexStride();
int vertexCount = kLineSegNumVertices * lineCount;
LineVertex* verts = reinterpret_cast<LineVertex*>(
- batchTarget->makeVertSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+ target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
if (!verts|| !linesIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
vertices.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, linesIndexBuffer,
firstVertex, kLineSegNumVertices, kIdxsPerLineSeg, lineCount,
kLineSegsNumInIdxBuffer);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
}
int firstVertex;
SkAutoTUnref<const GrIndexBuffer> quadsIndexBuffer(
- ref_quads_index_buffer(batchTarget->resourceProvider()));
+ ref_quads_index_buffer(target->resourceProvider()));
size_t vertexStride = sizeof(BezierVertex);
int vertexCount = kQuadNumVertices * quadCount + kQuadNumVertices * conicCount;
- void *vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void *vertices = target->makeVertexSpace(vertexStride, vertexCount,
+ &vertexBuffer, &firstVertex);
if (!vertices || !quadsIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
}
if (quadCount > 0) {
- batchTarget->initDraw(quadGP, this->pipeline());
+ target->initDraw(quadGP, this->pipeline());
{
GrVertices verts;
verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
firstVertex, kQuadNumVertices, kIdxsPerQuad, quadCount,
kQuadsNumInIdxBuffer);
- batchTarget->draw(verts);
+ target->draw(verts);
firstVertex += quadCount * kQuadNumVertices;
}
}
if (conicCount > 0) {
- batchTarget->initDraw(conicGP, this->pipeline());
+ target->initDraw(conicGP, this->pipeline());
{
GrVertices verts;
verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
firstVertex, kQuadNumVertices, kIdxsPerQuad, conicCount,
kQuadsNumInIdxBuffer);
- batchTarget->draw(verts);
+ target->draw(verts);
}
}
}
#include "GrAALinearizingConvexPathRenderer.h"
#include "GrAAConvexTessellator.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void draw(GrBatchTarget* batchTarget, const GrPipeline* pipeline, int vertexCount,
+ void draw(GrVertexBatch::Target* target, const GrPipeline* pipeline, int vertexCount,
size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) {
if (vertexCount == 0 || indexCount == 0) {
return;
const GrVertexBuffer* vertexBuffer;
GrVertices info;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, vertexCount, &vertexBuffer,
- &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t* idxs = batchTarget->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
memcpy(idxs, indices, indexCount * sizeof(uint16_t));
info.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer, firstVertex,
firstIndex, vertexCount, indexCount);
- batchTarget->draw(info);
+ target->draw(info);
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
if (indexCount + currentIndices > UINT16_MAX) {
// if we added the current instance, we would overflow the indices we can store in a
// uint16_t. Draw what we've got so far and reset.
- draw(batchTarget, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
- indices);
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ indices);
vertexCount = 0;
indexCount = 0;
}
vertexCount += currentVertices;
indexCount += currentIndices;
}
- draw(batchTarget, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
indices);
free(vertices);
free(indices);
#include "GrAtlasTextContext.h"
#include "GrBatchFontCache.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrBlurUtils.h"
#include "GrDefaultGeoProcFactory.h"
int fVertexOffset;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
// TODO actually only invert if we don't have RGBA
SkMatrix localMatrix;
get_vertex_stride_df(maskFormat, isLCD) :
get_vertex_stride(maskFormat)));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int glyphCount = this->numGlyphs();
const GrVertexBuffer* vertexBuffer;
- void* vertices = batchTarget->makeVertSpace(vertexStride,
- glyphCount * kVerticesPerGlyph,
- &vertexBuffer,
- &flushInfo.fVertexOffset);
+ void* vertices = target->makeVertexSpace(vertexStride,
+ glyphCount * kVerticesPerGlyph,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
- flushInfo.fIndexBuffer.reset(batchTarget->resourceProvider()->refQuadIndexBuffer());
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
if (!vertices || !flushInfo.fVertexBuffer) {
SkDebugf("Could not allocate vertices\n");
return;
//SkASSERT(glyph->fMaskFormat == this->maskFormat());
if (!fFontCache->hasGlyph(glyph) &&
- !strike->addGlyphToAtlas(batchTarget, glyph, scaler, skGlyph,
- maskFormat)) {
- this->flush(batchTarget, &flushInfo);
- batchTarget->initDraw(gp, this->pipeline());
+ !strike->addGlyphToAtlas(target, glyph, scaler, skGlyph, maskFormat)) {
+ this->flush(target, &flushInfo);
+ target->initDraw(gp, this->pipeline());
brokenRun = glyphIdx > 0;
- SkDEBUGCODE(bool success =) strike->addGlyphToAtlas(batchTarget,
+ SkDEBUGCODE(bool success =) strike->addGlyphToAtlas(target,
glyph,
scaler,
skGlyph,
SkASSERT(success);
}
fFontCache->addGlyphToBulkAndSetUseToken(&info.fBulkUseToken, glyph,
- batchTarget->currentToken());
+ target->currentToken());
// Texture coords are the last vertex attribute so we get a pointer to the
// first one and then map with stride in regenerateTextureCoords
// set use tokens for all of the glyphs in our subrun. This is only valid if we
// have a valid atlas generation
- fFontCache->setUseTokenBulk(info.fBulkUseToken,
- batchTarget->currentToken(),
- maskFormat);
+ fFontCache->setUseTokenBulk(info.fBulkUseToken, target->currentToken(), maskFormat);
}
// now copy all vertices
if (cache) {
SkGlyphCache::AttachCache(cache);
}
- this->flush(batchTarget, &flushInfo);
+ this->flush(target, &flushInfo);
}
// to avoid even the initial copy of the struct, we have a getter for the first item which
}
}
- void flush(GrBatchTarget* batchTarget, FlushInfo* flushInfo) {
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
GrVertices vertices;
int maxGlyphsPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset,
kVerticesPerGlyph, kIndicesPerGlyph, flushInfo->fGlyphsToFlush,
maxGlyphsPerDraw);
- batchTarget->draw(vertices);
+ target->draw(vertices);
flushInfo->fVertexOffset += kVerticesPerGlyph * flushInfo->fGlyphsToFlush;
flushInfo->fGlyphsToFlush = 0;
}
*/
#include "GrBatchAtlas.h"
-#include "GrBatchTarget.h"
-#include "GrGpu.h"
+#include "GrBatchFlushState.h"
#include "GrRectanizer.h"
#include "GrTracing.h"
#include "GrVertexBuffer.h"
class BatchPlot : public SkRefCnt {
public:
- typedef GrBatchAtlas::BatchToken BatchToken;
-
SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
// index() refers to the index of the plot in the owning GrAtlas's plot array. genID() is a
// to issue a new upload even if we update the cpu backing store. We use lastref to determine
// when we can evict a plot from the cache, ie if the last ref has already flushed through
// the gpu then we can reuse the plot
- BatchToken lastUploadToken() const { return fLastUpload; }
- BatchToken lastUseToken() const { return fLastUse; }
- void setLastUploadToken(BatchToken batchToken) {
+ GrBatchToken lastUploadToken() const { return fLastUpload; }
+ GrBatchToken lastUseToken() const { return fLastUse; }
+ void setLastUploadToken(GrBatchToken batchToken) {
SkASSERT(batchToken >= fLastUpload);
fLastUpload = batchToken;
}
- void setLastUseToken(BatchToken batchToken) {
+ void setLastUseToken(GrBatchToken batchToken) {
SkASSERT(batchToken >= fLastUse);
fLastUse = batchToken;
}
- void uploadToTexture(GrBatchTarget::TextureUploader uploader) {
+ void uploadToTexture(GrBatchUploader::TextureUploader* uploader) {
// We should only be issuing uploads if we are in fact dirty
SkASSERT(fDirty && fData && fTexture);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
const unsigned char* dataPtr = fData;
dataPtr += rowBytes * fDirtyRect.fTop;
dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
- uploader.writeTexturePixels(fTexture,
- fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
- fDirtyRect.width(), fDirtyRect.height(),
- fTexture->config(), dataPtr, rowBytes);
+ uploader->writeTexturePixels(fTexture,
+ fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(),
+ fTexture->config(), dataPtr, rowBytes);
fDirtyRect.setEmpty();
SkDEBUGCODE(fDirty = false;)
}
fTexture = texture;
}
- BatchToken fLastUpload;
- BatchToken fLastUse;
+ GrBatchToken fLastUpload;
+ GrBatchToken fLastUse;
uint32_t fIndex;
uint64_t fGenID;
////////////////////////////////////////////////////////////////////////////////
-class GrPlotUploader : public GrBatchTarget::Uploader {
+class GrPlotUploader : public GrBatchUploader {
public:
GrPlotUploader(BatchPlot* plot)
: INHERITED(plot->lastUploadToken())
SkASSERT(plot);
}
- void upload(GrBatchTarget::TextureUploader uploader) override {
+ void upload(TextureUploader* uploader) override {
fPlot->uploadToTexture(uploader);
}
private:
SkAutoTUnref<BatchPlot> fPlot;
- typedef GrBatchTarget::Uploader INHERITED;
+ typedef GrBatchUploader INHERITED;
};
///////////////////////////////////////////////////////////////////////////////
fPlotList.addToHead(plot);
}
-inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, BatchPlot* plot) {
+inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, BatchPlot* plot) {
this->makeMRU(plot);
// If our most recent upload has already occurred then we have to insert a new
// upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
// This new update will piggy back on that previously scheduled update.
- if (batchTarget->isIssued(plot->lastUploadToken())) {
- plot->setLastUploadToken(batchTarget->asapToken());
+ if (target->hasTokenBeenFlushed(plot->lastUploadToken())) {
+ plot->setLastUploadToken(target->asapToken());
SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)));
- batchTarget->upload(uploader);
+ target->upload(uploader);
}
*id = plot->id();
}
-bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget,
+bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
int width, int height, const void* image, SkIPoint16* loc) {
// We should already have a texture, TODO clean this up
SkASSERT(fTexture &&
plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
plot = plotIter.get();
SkASSERT(plot);
- if (batchTarget->isIssued(plot->lastUseToken())) {
+ if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) {
this->processEviction(plot->id());
plot->resetRects();
SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
return fPlotArray[index]->genID() == GetGenerationFromID(id);
}
-void GrBatchAtlas::setLastUseToken(AtlasID id, BatchToken batchToken) {
+void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) {
SkASSERT(this->hasID(id));
uint32_t index = GetIndexFromID(id);
SkASSERT(index < fNumPlotsX * fNumPlotsY);
fPlotArray[index]->setLastUseToken(batchToken);
}
-void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, BatchToken batchToken) {
+void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater,
+ GrBatchToken batchToken) {
int count = updater.fPlotsToUpdate.count();
for (int i = 0; i < count; i++) {
BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
#define GrBatchAtlas_DEFINED
#include "GrTexture.h"
+#include "batches/GrDrawBatch.h"
#include "SkPoint.h"
#include "SkTDArray.h"
#include "SkTInternalLList.h"
class BatchPlot;
-class GrBatchTarget;
class GrRectanizer;
typedef SkTInternalLList<BatchPlot> GrBatchPlotList;
class GrBatchAtlas {
public:
- typedef uint64_t BatchToken;
// An AtlasID is an opaque handle which callers can use to determine if the atlas contains
// a specific piece of data
typedef uint64_t AtlasID;
// NOTE: If the client intends to refer to the atlas, they should immediately call 'setUseToken'
// with the currentToken from the batch target, otherwise the next call to addToAtlas might
// cause an eviction
- bool addToAtlas(AtlasID*, GrBatchTarget*, int width, int height, const void* image,
+ bool addToAtlas(AtlasID*, GrDrawBatch::Target*, int width, int height, const void* image,
SkIPoint16* loc);
GrTexture* getTexture() const { return fTexture; }
bool hasID(AtlasID id);
// To ensure the atlas does not evict a given entry, the client must set the last use token
- void setLastUseToken(AtlasID id, BatchToken batchToken);
+ void setLastUseToken(AtlasID id, GrBatchToken batchToken);
void registerEvictionCallback(EvictionFunc func, void* userData) {
EvictionData* data = fEvictionCallbacks.append();
data->fFunc = func;
friend class GrBatchAtlas;
};
- void setLastUseTokenBulk(const BulkUseTokenUpdater& reffer, BatchToken);
+ void setLastUseTokenBulk(const BulkUseTokenUpdater& reffer, GrBatchToken);
static const int kGlyphMaxDim = 256;
static bool GlyphTooLargeForAtlas(int width, int height) {
return (id >> 16) & 0xffffffffffff;
}
- inline void updatePlot(GrBatchTarget*, AtlasID*, BatchPlot*);
+ inline void updatePlot(GrDrawBatch::Target*, AtlasID*, BatchPlot*);
inline void makeMRU(BatchPlot* plot);
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchFlushState.h"
+
+#include "GrBatchAtlas.h"
+#include "GrPipeline.h"
+
+GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider,
+ GrBatchToken lastFlushedToken)
+ : fGpu(gpu)
+ , fUploader(gpu)
+ , fResourceProvider(resourceProvider)
+ , fVertexPool(gpu)
+ , fIndexPool(gpu)
+ , fCurrentToken(lastFlushedToken)
+ , fLastFlushedToken(lastFlushedToken) {}
+
+void* GrBatchFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex) {
+ return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
+}
+
+uint16_t* GrBatchFlushState::makeIndexSpace(int indexCount,
+ const GrIndexBuffer** buffer, int* startIndex) {
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
+}
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchBuffer_DEFINED
+#define GrBatchBuffer_DEFINED
+
+#include "GrBufferAllocPool.h"
+#include "batches/GrVertexBatch.h"
+
+class GrResourceProvider;
+
+/** Simple class that performs the upload on behalf of a GrBatchUploader. */
+class GrBatchUploader::TextureUploader {
+public:
+ TextureUploader(GrGpu* gpu) : fGpu(gpu) { SkASSERT(gpu); }
+
+ /**
+ * Updates the pixels in a rectangle of a texture.
+ *
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param buffer memory to read pixels from
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ */
+ bool writeTexturePixels(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ return fGpu->writePixels(texture, left, top, width, height, config, buffer, rowBytes);
+ }
+
+private:
+ GrGpu* fGpu;
+};
+
+/** Tracks the state across all the GrBatches in a GrDrawTarget flush. */
+class GrBatchFlushState {
+public:
+ GrBatchFlushState(GrGpu*, GrResourceProvider*, GrBatchToken lastFlushedToken);
+
+ ~GrBatchFlushState() { SkASSERT(fLastFlushedToken == fCurrentToken); }
+
+ void advanceToken() { ++fCurrentToken; }
+
+ void advanceLastFlushedToken() { ++fLastFlushedToken; }
+
+ /** Inserts an upload to be executred after all batches in the flush prepared their draws
+ but before the draws are executed to the backend 3D API. */
+ void addASAPUpload(GrBatchUploader* upload) {
+ fAsapUploads.push_back().reset(SkRef(upload));
+ }
+
+ const GrCaps& caps() const { return *fGpu->caps(); }
+ GrResourceProvider* resourceProvider() const { return fResourceProvider; }
+
+ /** Has the token been flushed to the backend 3D API. */
+ bool hasTokenBeenFlushed(GrBatchToken token) const { return fLastFlushedToken >= token; }
+
+ /** The current token advances once for every contiguous set of uninterrupted draws prepared
+ by a batch. */
+ GrBatchToken currentToken() const { return fCurrentToken; }
+
+ /** The last token flushed to all the way to the backend API. */
+ GrBatchToken lastFlushedToken() const { return fLastFlushedToken; }
+
+ /** This is a magic token that can be used to indicate that an upload should occur before
+ any draws for any batch in the current flush execute. */
+ GrBatchToken asapToken() const { return fLastFlushedToken + 1; }
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex);
+ uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex);
+
+ /** This is called after each batch has a chance to prepare its draws and before the draws
+ are issued. */
+ void preIssueDraws() {
+ fVertexPool.unmap();
+ fIndexPool.unmap();
+ int uploadCount = fAsapUploads.count();
+ for (int i = 0; i < uploadCount; i++) {
+ fAsapUploads[i]->upload(&fUploader);
+ }
+ fAsapUploads.reset();
+ }
+
+ void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
+
+ void putBackVertexSpace(size_t sizeInBytes) { fVertexPool.putBack(sizeInBytes); }
+
+ GrBatchUploader::TextureUploader* uploader() { return &fUploader; }
+
+ GrGpu* gpu() { return fGpu; }
+
+private:
+ GrGpu* fGpu;
+ GrBatchUploader::TextureUploader fUploader;
+
+ GrResourceProvider* fResourceProvider;
+
+ GrVertexBufferAllocPool fVertexPool;
+ GrIndexBufferAllocPool fIndexPool;
+
+ SkTArray<SkAutoTUnref<GrBatchUploader>, true> fAsapUploads;
+
+ GrBatchToken fCurrentToken;
+
+ GrBatchToken fLastFlushedToken;
+};
+
+/**
+ * GrDrawBatch instances use this object to allocate space for their geometry and to issue the draws
+ * that render their batch.
+ */
+class GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrDrawBatch* batch) : fState(state), fBatch(batch) {}
+
+ void upload(GrBatchUploader* upload) {
+ if (this->asapToken() == upload->lastUploadToken()) {
+ fState->addASAPUpload(upload);
+ } else {
+ fBatch->fInlineUploads.push_back().reset(SkRef(upload));
+ }
+ }
+
+ bool hasTokenBeenFlushed(GrBatchToken token) const {
+ return fState->hasTokenBeenFlushed(token);
+ }
+ GrBatchToken currentToken() const { return fState->currentToken(); }
+ GrBatchToken asapToken() const { return fState->asapToken(); }
+
+ const GrCaps& caps() const { return fState->caps(); }
+
+ GrResourceProvider* resourceProvider() const { return fState->resourceProvider(); }
+
+protected:
+ GrDrawBatch* batch() { return fBatch; }
+ GrBatchFlushState* state() { return fState; }
+
+private:
+ GrBatchFlushState* fState;
+ GrDrawBatch* fBatch;
+};
+
+/** Extension of GrDrawBatch::Target for use by GrVertexBatch. Adds the ability to create vertex
+ draws. */
+class GrVertexBatch::Target : public GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrVertexBatch* batch) : INHERITED(state, batch) {}
+
+ void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
+ GrVertexBatch::DrawArray* draws = this->vertexBatch()->fDrawArrays.addToTail();
+ draws->fPrimitiveProcessor.reset(primProc);
+ this->state()->advanceToken();
+ }
+
+ void draw(const GrVertices& vertices) {
+ this->vertexBatch()->fDrawArrays.tail()->fDraws.push_back(vertices);
+ }
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex) {
+ return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
+ }
+
+ uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex) {
+ return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
+ }
+
+ /** Helpers for batches which over-allocate and then return data to the pool. */
+ void putBackIndices(int indices) { this->state()->putBackIndices(indices); }
+ void putBackVertices(int vertices, size_t vertexStride) {
+ this->state()->putBackVertexSpace(vertices * vertexStride);
+ }
+
+private:
+ GrVertexBatch* vertexBatch() { return static_cast<GrVertexBatch*>(this->batch()); }
+ typedef GrDrawBatch::Target INHERITED;
+};
+
+#endif
}
}
-bool GrBatchTextStrike::addGlyphToAtlas(GrBatchTarget* batchTarget, GrGlyph* glyph,
+bool GrBatchTextStrike::addGlyphToAtlas(GrDrawBatch::Target* target, GrGlyph* glyph,
GrFontScaler* scaler, const SkGlyph& skGlyph,
GrMaskFormat expectedMaskFormat) {
SkASSERT(glyph);
}
}
- bool success = fBatchFontCache->addToAtlas(this, &glyph->fID, batchTarget, expectedMaskFormat,
+ bool success = fBatchFontCache->addToAtlas(this, &glyph->fID, target, expectedMaskFormat,
glyph->width(), glyph->height(),
storage.get(), &glyph->fAtlasLocation);
if (success) {
#include "SkVarAlloc.h"
class GrBatchFontCache;
-class GrBatchTarget;
class GrGpu;
/**
// happen.
// TODO we can handle some of these cases if we really want to, but the long term solution is to
// get the actual glyph image itself when we get the glyph metrics.
- bool addGlyphToAtlas(GrBatchTarget*, GrGlyph*, GrFontScaler*, const SkGlyph&,
+ bool addGlyphToAtlas(GrDrawBatch::Target*, GrGlyph*, GrFontScaler*, const SkGlyph&,
GrMaskFormat expectedMaskFormat);
// testing
}
// To ensure the GrBatchAtlas does not evict the Glyph Mask from its texture backing store,
- // the client must pass in the currentToken from the GrBatchTarget along with the GrGlyph.
+ // the client must pass in the current batch token along with the GrGlyph.
// A BulkUseTokenUpdater is used to manage bulk last use token updating in the Atlas.
// For convenience, this function will also set the use token for the current glyph if required
// NOTE: the bulk uploader is only valid if the subrun has a valid atlasGeneration
void addGlyphToBulkAndSetUseToken(GrBatchAtlas::BulkUseTokenUpdater* updater,
- GrGlyph* glyph, GrBatchAtlas::BatchToken token) {
+ GrGlyph* glyph, GrBatchToken token) {
SkASSERT(glyph);
updater->add(glyph->fID);
this->getAtlas(glyph->fMaskFormat)->setLastUseToken(glyph->fID, token);
}
void setUseTokenBulk(const GrBatchAtlas::BulkUseTokenUpdater& updater,
- GrBatchAtlas::BatchToken token,
+ GrBatchToken token,
GrMaskFormat format) {
this->getAtlas(format)->setLastUseTokenBulk(updater, token);
}
// add to texture atlas that matches this format
bool addToAtlas(GrBatchTextStrike* strike, GrBatchAtlas::AtlasID* id,
- GrBatchTarget* batchTarget,
+ GrDrawBatch::Target* target,
GrMaskFormat format, int width, int height, const void* image,
SkIPoint16* loc) {
fPreserveStrike = strike;
- return this->getAtlas(format)->addToAtlas(id, batchTarget, width, height, image, loc);
+ return this->getAtlas(format)->addToAtlas(id, target, width, height, image, loc);
}
// Some clients may wish to verify the integrity of the texture backing store of the
+++ /dev/null
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrBatchTarget.h"
-
-#include "GrBatchAtlas.h"
-#include "GrPipeline.h"
-
-GrBatchTarget::GrBatchTarget(GrGpu* gpu)
- : fGpu(gpu)
- , fVertexPool(gpu)
- , fIndexPool(gpu)
- , fFlushBuffer(kFlushBufferInitialSizeInBytes)
- , fIter(fFlushBuffer)
- , fNumberOfDraws(0)
- , fCurrentToken(0)
- , fLastFlushedToken(0)
- , fInlineUpdatesIndex(0) {
-}
-
-void GrBatchTarget::flushNext(int n) {
- for (; n > 0; n--) {
- fLastFlushedToken++;
- SkDEBUGCODE(bool verify =) fIter.next();
- SkASSERT(verify);
-
- BufferedFlush* bf = fIter.get();
-
- // Flush all texture uploads
- int uploadCount = fInlineUploads.count();
- while (fInlineUpdatesIndex < uploadCount &&
- fInlineUploads[fInlineUpdatesIndex]->lastUploadToken() <= fLastFlushedToken) {
- fInlineUploads[fInlineUpdatesIndex++]->upload(TextureUploader(fGpu));
- }
-
- GrProgramDesc desc;
- const GrPipeline* pipeline = bf->fPipeline;
- const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
- fGpu->buildProgramDesc(&desc, *primProc, *pipeline, bf->fBatchTracker);
-
- GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
-
- int drawCount = bf->fVertexDraws.count();
- const SkSTArray<1, GrVertices, true>& vertexDraws = bf->fVertexDraws;
- for (int i = 0; i < drawCount; i++) {
- fGpu->draw(args, vertexDraws[i]);
- }
- }
-}
-
-void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex) {
- return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
-}
-
-uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
- const GrIndexBuffer** buffer, int* startIndex) {
- return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
-}
-
+++ /dev/null
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrBatchBuffer_DEFINED
-#define GrBatchBuffer_DEFINED
-
-#include "GrBatchAtlas.h"
-#include "GrBufferAllocPool.h"
-#include "GrContext.h"
-#include "GrPendingProgramElement.h"
-#include "GrPipeline.h"
-#include "GrTRecorder.h"
-#include "GrVertices.h"
-
-/*
- * GrBatch instances use this object to allocate space for their geometry and to issue the draws
- * that render their batch.
- */
-class GrBatchTarget : public SkNoncopyable {
-public:
- typedef GrBatchAtlas::BatchToken BatchToken;
- GrBatchTarget(GrGpu* gpu);
-
- void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
- GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
- fNumberOfDraws++;
- fCurrentToken++;
- }
-
- class TextureUploader {
- public:
- TextureUploader(GrGpu* gpu) : fGpu(gpu) { SkASSERT(gpu); }
-
- /**
- * Updates the pixels in a rectangle of a texture.
- *
- * @param left left edge of the rectangle to write (inclusive)
- * @param top top edge of the rectangle to write (inclusive)
- * @param width width of rectangle to write in pixels.
- * @param height height of rectangle to write in pixels.
- * @param config the pixel config of the source buffer
- * @param buffer memory to read pixels from
- * @param rowBytes number of bytes between consecutive rows. Zero
- * means rows are tightly packed.
- */
- bool writeTexturePixels(GrTexture* texture,
- int left, int top, int width, int height,
- GrPixelConfig config, const void* buffer,
- size_t rowBytes) {
- return fGpu->writePixels(texture, left, top, width, height, config, buffer, rowBytes);
- }
-
- private:
- GrGpu* fGpu;
- };
-
- class Uploader : public SkRefCnt {
- public:
- Uploader(BatchToken lastUploadToken) : fLastUploadToken(lastUploadToken) {}
- BatchToken lastUploadToken() const { return fLastUploadToken; }
- virtual void upload(TextureUploader)=0;
-
- private:
- BatchToken fLastUploadToken;
- };
-
- void upload(Uploader* upload) {
- if (this->asapToken() == upload->lastUploadToken()) {
- fAsapUploads.push_back().reset(SkRef(upload));
- } else {
- fInlineUploads.push_back().reset(SkRef(upload));
- }
- }
-
- void draw(const GrVertices& vertices) {
- fFlushBuffer.back().fVertexDraws.push_back(vertices);
- }
-
- bool isIssued(BatchToken token) const { return fLastFlushedToken >= token; }
- BatchToken currentToken() const { return fCurrentToken; }
- BatchToken asapToken() const { return fLastFlushedToken + 1; }
-
- // TODO much of this complexity goes away when batch is everywhere
- void resetNumberOfDraws() { fNumberOfDraws = 0; }
- int numberOfDraws() const { return fNumberOfDraws; }
- void preFlush() {
- this->unmapVertexAndIndexBuffers();
- int updateCount = fAsapUploads.count();
- for (int i = 0; i < updateCount; i++) {
- fAsapUploads[i]->upload(TextureUploader(fGpu));
- }
- fInlineUpdatesIndex = 0;
- fIter = FlushBuffer::Iter(fFlushBuffer);
- }
- void flushNext(int n);
- void postFlush() {
- SkASSERT(!fIter.next());
- fFlushBuffer.reset();
- fAsapUploads.reset();
- fInlineUploads.reset();
- }
-
- const GrCaps& caps() const { return *fGpu->caps(); }
-
- GrResourceProvider* resourceProvider() const { return fGpu->getContext()->resourceProvider(); }
-
- void* makeVertSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex);
- uint16_t* makeIndexSpace(int indexCount,
- const GrIndexBuffer** buffer, int* startIndex);
-
- // A helper for draws which overallocate and then return data to the pool
- void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
-
- void putBackVertices(size_t vertices, size_t vertexStride) {
- fVertexPool.putBack(vertices * vertexStride);
- }
-
- void reset() {
- fVertexPool.reset();
- fIndexPool.reset();
- }
-
-private:
- void unmapVertexAndIndexBuffers() {
- fVertexPool.unmap();
- fIndexPool.unmap();
- }
-
- GrGpu* fGpu;
- GrVertexBufferAllocPool fVertexPool;
- GrIndexBufferAllocPool fIndexPool;
-
- typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
-
- struct BufferedFlush {
- BufferedFlush(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline)
- : fPrimitiveProcessor(primProc)
- , fPipeline(pipeline) {}
- typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
- ProgramPrimitiveProcessor fPrimitiveProcessor;
- const GrPipeline* fPipeline;
- GrBatchTracker fBatchTracker;
- SkSTArray<1, GrVertices, true> fVertexDraws;
- };
-
- enum {
- kFlushBufferInitialSizeInBytes = 8 * sizeof(BufferedFlush),
- };
-
- typedef GrTRecorder<BufferedFlush, TBufferAlign> FlushBuffer;
-
- FlushBuffer fFlushBuffer;
- // TODO this is temporary
- FlushBuffer::Iter fIter;
- int fNumberOfDraws;
- BatchToken fCurrentToken;
- BatchToken fLastFlushedToken; // The next token to be flushed
- SkTArray<SkAutoTUnref<Uploader>, true> fAsapUploads;
- SkTArray<SkAutoTUnref<Uploader>, true> fInlineUploads;
- int fInlineUpdatesIndex;
-};
-
-#endif
}
void GrBufferedDrawTarget::onFlush() {
- fCommands->flush(this);
+ fCommands->flush(this->getGpu(), this->getContext()->resourceProvider());
++fDrawID;
}
GrCommandBuilder* GrCommandBuilder::Create(GrGpu* gpu, bool reorder) {
if (reorder) {
- return SkNEW_ARGS(GrReorderCommandBuilder, (gpu));
+ return SkNEW(GrReorderCommandBuilder);
} else {
- return SkNEW_ARGS(GrInOrderCommandBuilder, (gpu));
+ return SkNEW(GrInOrderCommandBuilder);
}
}
#include "GrTargetCommands.h"
+class GrGpu;
+class GrResourceProvider;
class GrBufferedDrawTarget;
class GrCommandBuilder : ::SkNoncopyable {
virtual ~GrCommandBuilder() {}
void reset() { fCommands.reset(); }
- void flush(GrBufferedDrawTarget* bufferedDrawTarget) { fCommands.flush(bufferedDrawTarget); }
+ void flush(GrGpu* gpu, GrResourceProvider* rp) { fCommands.flush(gpu, rp); }
virtual Cmd* recordClearStencilClip(const SkIRect& rect,
bool insideClip,
typedef GrTargetCommands::ClearStencilClip ClearStencilClip;
typedef GrTargetCommands::CopySurface CopySurface;
- GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {}
+ GrCommandBuilder() {}
GrTargetCommands::CmdBuffer* cmdBuffer() { return fCommands.cmdBuffer(); }
- GrBatchTarget* batchTarget() { return fCommands.batchTarget(); }
-
private:
GrTargetCommands fCommands;
#include "GrContext.h"
#include "GrBatchFontCache.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrBufferedDrawTarget.h"
#include "GrCaps.h"
#include "GrDefaultPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(SkPoint));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, maxVertices,
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, maxVertices,
+ &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
void* indices = NULL;
if (isIndexed) {
- indices = batchTarget->makeIndexSpace(maxIndices, &indexBuffer, &firstIndex);
+ indices = target->makeIndexSpace(maxIndices, &indexBuffer, &firstIndex);
if (!indices) {
SkDebugf("Could not allocate indices\n");
} else {
vertices.init(primitiveType, vertexBuffer, firstVertex, vertexOffset);
}
- batchTarget->draw(vertices);
+ target->draw(vertices);
// put back reserves
- batchTarget->putBackIndices((size_t)(maxIndices - indexOffset));
- batchTarget->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
+ target->putBackIndices((size_t)(maxIndices - indexOffset));
+ target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
GrImmediateDrawTarget::GrImmediateDrawTarget(GrContext* context)
: INHERITED(context)
- , fBatchTarget(this->getGpu())
, fDrawID(0) {
}
}
void GrImmediateDrawTarget::onDrawBatch(GrDrawBatch* batch) {
- fBatchTarget.resetNumberOfDraws();
+#if 0
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(batch);
- vertexBatch->generateGeometry(&fBatchTarget);
+ vertexBatch->prepareDraws(&fBatchTarget);
vertexBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
fBatchTarget.preFlush();
fBatchTarget.flushNext(vertexBatch->numberOfDraws());
fBatchTarget.postFlush();
+#endif
}
void GrImmediateDrawTarget::onClear(const SkIRect& rect, GrColor color,
this->getGpu()->discard(renderTarget);
}
-void GrImmediateDrawTarget::onReset() {
- fBatchTarget.reset();
-}
+void GrImmediateDrawTarget::onReset() {}
void GrImmediateDrawTarget::onFlush() {
++fDrawID;
#include "GrDrawTarget.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
/**
* A debug GrDrawTarget which immediately flushes every command it receives
bool isIssued(uint32_t drawID) override { return drawID != fDrawID; }
- GrBatchTarget fBatchTarget;
uint32_t fDrawID;
typedef GrClipTarget INHERITED;
}
}
- return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch, this->batchTarget()));
+ return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch));
}
GrTargetCommands::Cmd*
typedef GrCommandBuilder::Cmd Cmd;
typedef GrCommandBuilder::State State;
- GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { }
+ GrInOrderCommandBuilder() : INHERITED() { }
Cmd* recordDrawBatch(GrDrawBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
#include "GrOvalRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrDrawTarget.h"
#include "GrGeometryProcessor.h"
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
return;
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(CircleVertex));
QuadHelper helper;
- CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(batchTarget, vertexStride,
+ CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(target, vertexStride,
instanceCount));
if (!verts) {
return;
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
return;
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
QuadHelper helper;
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(EllipseVertex));
EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
- helper.init(batchTarget, vertexStride, instanceCount));
+ helper.init(target, vertexStride, instanceCount));
if (!verts) {
return;
}
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// Setup geometry processor
SkAutoTUnref<GrGeometryProcessor> gp(DIEllipseEdgeEffect::Create(this->color(),
this->viewMatrix(),
this->mode(),
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(DIEllipseVertex));
QuadHelper helper;
DIEllipseVertex* verts = reinterpret_cast<DIEllipseVertex*>(
- helper.init(batchTarget, vertexStride, instanceCount));
+ helper.init(target, vertexStride, instanceCount));
if (!verts) {
return;
}
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// reset to device coordinates
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
// drop out the middle quad if we're stroked
int indicesPerInstance = this->stroke() ? kIndicesPerStrokeRRect : kIndicesPerRRect;
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- ref_rrect_index_buffer(this->stroke(), batchTarget->resourceProvider()));
+ ref_rrect_index_buffer(this->stroke(), target->resourceProvider()));
InstancedHelper helper;
- CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(batchTarget,
+ CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(target,
kTriangles_GrPrimitiveType, vertexStride, indexBuffer, kVertsPerRRect,
indicesPerInstance, instanceCount));
if (!verts || !indexBuffer) {
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// reset to device coordinates
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
// drop out the middle quad if we're stroked
int indicesPerInstance = this->stroke() ? kIndicesPerStrokeRRect : kIndicesPerRRect;
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- ref_rrect_index_buffer(this->stroke(), batchTarget->resourceProvider()));
+ ref_rrect_index_buffer(this->stroke(), target->resourceProvider()));
InstancedHelper helper;
EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
- helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride, indexBuffer,
+ helper.init(target, kTriangles_GrPrimitiveType, vertexStride, indexBuffer,
kVertsPerRRect, indicesPerInstance, instanceCount));
if (!verts || !indexBuffer) {
SkDebugf("Could not allocate vertices\n");
verts++;
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
}
#endif
- return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch, this->batchTarget()));
+ return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch));
}
typedef GrCommandBuilder::Cmd Cmd;
typedef GrCommandBuilder::State State;
- GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {}
+ GrReorderCommandBuilder() : INHERITED() {}
Cmd* recordDrawBatch(GrDrawBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
#include "GrTargetCommands.h"
-#include "GrBufferedDrawTarget.h"
-
+#include "GrBatchFlushState.h"
+#include "GrGpu.h"
+#include "GrPathRendering.h"
#include "batches/GrDrawBatch.h"
#include "batches/GrVertexBatch.h"
void GrTargetCommands::reset() {
fCmdBuffer.reset();
- fBatchTarget.reset();
}
-void GrTargetCommands::flush(GrBufferedDrawTarget* bufferedDrawTarget) {
+void GrTargetCommands::flush(GrGpu* gpu, GrResourceProvider* resourceProvider) {
GrBATCH_INFO("Flushing\n");
if (fCmdBuffer.empty()) {
return;
}
-
- GrGpu* gpu = bufferedDrawTarget->getGpu();
-
+ GrBatchFlushState flushState(gpu, resourceProvider, fLastFlushToken);
// Loop over all batches and generate geometry
CmdBuffer::Iter genIter(fCmdBuffer);
while (genIter.next()) {
if (Cmd::kDrawBatch_CmdType == genIter->type()) {
DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
- fBatchTarget.resetNumberOfDraws();
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(db->batch());
- vertexBatch->generateGeometry(&fBatchTarget);
- vertexBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
+ vertexBatch->prepareDraws(&flushState);
}
}
- fBatchTarget.preFlush();
+ flushState.preIssueDraws();
CmdBuffer::Iter iter(fCmdBuffer);
-
while (iter.next()) {
- iter->execute(gpu);
+ iter->execute(&flushState);
}
-
- fBatchTarget.postFlush();
+ fLastFlushToken = flushState.lastFlushedToken();
}
-void GrTargetCommands::StencilPath::execute(GrGpu* gpu) {
+void GrTargetCommands::StencilPath::execute(GrBatchFlushState* state) {
GrPathRendering::StencilPathArgs args(fUseHWAA, fRenderTarget.get(), &fViewMatrix, &fScissor,
&fStencil);
- gpu->pathRendering()->stencilPath(args, this->path());
+ state->gpu()->pathRendering()->stencilPath(args, this->path());
}
-void GrTargetCommands::DrawPath::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawPath::execute(GrBatchFlushState* state) {
if (!fState->fCompiled) {
- gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
- fState->fBatchTracker);
+ state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
+ *fState->getPipeline(), fState->fBatchTracker);
fState->fCompiled = true;
}
GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
&fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
- gpu->pathRendering()->drawPath(args, this->path());
+ state->gpu()->pathRendering()->drawPath(args, this->path());
}
-void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawPaths::execute(GrBatchFlushState* state) {
if (!fState->fCompiled) {
- gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
- fState->fBatchTracker);
+ state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
+ *fState->getPipeline(), fState->fBatchTracker);
fState->fCompiled = true;
}
GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
&fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
- gpu->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType, fTransforms,
- fTransformType, fCount);
+ state->gpu()->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType,
+ fTransforms, fTransformType, fCount);
}
-void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawBatch::execute(GrBatchFlushState* state) {
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
- const GrVertexBatch* vertexBatch = static_cast<const GrVertexBatch*>(fBatch.get());
- fBatchTarget->flushNext(vertexBatch->numberOfDraws());
+ GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(fBatch.get());
+ vertexBatch->issueDraws(state);
}
-void GrTargetCommands::Clear::execute(GrGpu* gpu) {
+
+void GrTargetCommands::Clear::execute(GrBatchFlushState* state) {
if (GrColor_ILLEGAL == fColor) {
- gpu->discard(this->renderTarget());
+ state->gpu()->discard(this->renderTarget());
} else {
- gpu->clear(fRect, fColor, this->renderTarget());
+ state->gpu()->clear(fRect, fColor, this->renderTarget());
}
}
-void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
- gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
+void GrTargetCommands::ClearStencilClip::execute(GrBatchFlushState* state) {
+ state->gpu()->clearStencilClip(fRect, fInsideClip, this->renderTarget());
}
-void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
- gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
+void GrTargetCommands::CopySurface::execute(GrBatchFlushState* state) {
+ state->gpu()->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
}
#ifndef GrTargetCommands_DEFINED
#define GrTargetCommands_DEFINED
-#include "GrBatchTarget.h"
#include "GrDrawTarget.h"
-#include "GrGpu.h"
#include "GrPath.h"
#include "GrPendingProgramElement.h"
+#include "GrPrimitiveProcessor.h"
#include "GrRenderTarget.h"
#include "GrTRecorder.h"
-#include "SkRect.h"
-#include "SkTypes.h"
#include "batches/GrDrawBatch.h"
+#include "SkRect.h"
-class GrBufferedDrawTarget;
+class GrResourceProvider;
+class GrBatchFlushState;
// TODO: Convert all commands into GrBatch and remove this class.
class GrTargetCommands : ::SkNoncopyable {
public:
- GrTargetCommands(GrGpu* gpu)
- : fCmdBuffer(kCmdBufferInitialSizeInBytes)
- , fBatchTarget(gpu) {
- }
+ GrTargetCommands() : fCmdBuffer(kCmdBufferInitialSizeInBytes), fLastFlushToken(0) {}
class Cmd : ::SkNoncopyable {
public:
{}
virtual ~Cmd() {}
- virtual void execute(GrGpu*) = 0;
+ virtual void execute(GrBatchFlushState*) = 0;
CmdType type() const { return fType; }
};
void reset();
- void flush(GrBufferedDrawTarget*);
+ void flush(GrGpu*, GrResourceProvider*);
private:
friend class GrCommandBuilder;
const GrPath* path() const { return fPath.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkMatrix fViewMatrix;
bool fUseHWAA;
const GrPath* path() const { return fPath.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkAutoTUnref<StateForPathDraw> fState;
GrStencilSettings fStencilSettings;
const GrPathRange* pathRange() const { return fPathRange.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkAutoTUnref<StateForPathDraw> fState;
char* fIndices;
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIRect fRect;
GrColor fColor;
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIRect fRect;
bool fInsideClip;
GrSurface* dst() const { return fDst.get(); }
GrSurface* src() const { return fSrc.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIPoint fDstPoint;
SkIRect fSrcRect;
};
struct DrawBatch : public Cmd {
- DrawBatch(GrDrawBatch* batch, GrBatchTarget* batchTarget)
+ DrawBatch(GrDrawBatch* batch)
: Cmd(kDrawBatch_CmdType)
- , fBatch(SkRef(batch))
- , fBatchTarget(batchTarget) {
+ , fBatch(SkRef(batch)){
SkASSERT(!batch->isUsed());
}
GrDrawBatch* batch() { return fBatch; }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
private:
SkAutoTUnref<GrDrawBatch> fBatch;
- GrBatchTarget* fBatchTarget;
};
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
CmdBuffer* cmdBuffer() { return &fCmdBuffer; }
- GrBatchTarget* batchTarget() { return &fBatchTarget; }
CmdBuffer fCmdBuffer;
- GrBatchTarget fBatchTarget;
+ GrBatchToken fLastFlushToken;
};
#endif
-
#include "GrTessellatingPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrPathUtils.h"
return actualCount;
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// construct a cache key from the path's genID and the view matrix
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
}
fStroke.asUniqueKeyFragment(&builder[2 + clipBoundsSize32]);
builder.finish();
- GrResourceProvider* rp = batchTarget->resourceProvider();
+ GrResourceProvider* rp = target->resourceProvider();
SkAutoTUnref<GrVertexBuffer> vertexBuffer(rp->findAndRefTByUniqueKey<GrVertexBuffer>(key));
int actualCount;
SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
fViewMatrix));
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
GrPrimitiveType primitiveType = WIREFRAME ? kLines_GrPrimitiveType
: kTriangles_GrPrimitiveType;
GrVertices vertices;
vertices.init(primitiveType, vertexBuffer.get(), 0, actualCount);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
#include "GrAAFillRectBatch.h"
+#include "GrBatchFlushState.h"
#include "GrColor.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrResourceKey.h"
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkAutoTUnref<const GrGeometryProcessor> gp(CreateFillRectGP(canTweakAlphaForCoverage,
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
SkASSERT(Base::StrideCheck(vertexStride, canTweakAlphaForCoverage,
this->usesLocalCoords()));
int instanceCount = fGeoData.count();
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(get_index_buffer(
- batchTarget->resourceProvider()));
+ SkAutoTUnref<const GrIndexBuffer> indexBuffer(get_index_buffer(target->resourceProvider()));
InstancedHelper helper;
- void* vertices = helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
indexBuffer, kVertsPerAAFillRect, kIndicesPerAAFillRect,
instanceCount);
if (!vertices || !indexBuffer) {
fGeoData[i],
canTweakAlphaForCoverage);
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
#include "GrAAStrokeRectBatch.h"
+#include "GrBatchFlushState.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrResourceKey.h"
#include "GrResourceProvider.h"
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
-void GrAAStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrAAStrokeRectBatch::onPrepareDraws(Target* target) {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkAutoTUnref<const GrGeometryProcessor> gp(create_stroke_rect_gp(canTweakAlphaForCoverage,
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
int instanceCount = fGeoData.count();
const SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- GetIndexBuffer(batchTarget->resourceProvider(), this->miterStroke()));
+ GetIndexBuffer(target->resourceProvider(), this->miterStroke()));
InstancedHelper helper;
- void* vertices = helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
indexBuffer, verticesPerInstance, indicesPerInstance,
instanceCount);
if (!vertices || !indexBuffer) {
args.fMiterStroke,
canTweakAlphaForCoverage);
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
const GrIndexBuffer* GrAAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
#include "SkMatrix.h"
#include "SkRect.h"
+class GrResourceProvider;
+
class GrAAStrokeRectBatch : public GrVertexBatch {
public:
// TODO support AA rotated stroke rects by copying around view matrices
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrAAStrokeRectBatch(const Geometry& geometry, const SkMatrix& viewMatrix) {
this->initClassID<GrAAStrokeRectBatch>();
fBatch.fViewMatrix = viewMatrix;
#include "GrBWFillRectBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrColor.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrPrimitiveProcessor.h"
#include "GrVertexBatch.h"
-class GrBatchTarget;
+class GrBatchFlushState;
class SkMatrix;
struct SkRect;
fBatch.fCoverageIgnored = !init.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkAutoTUnref<const GrGeometryProcessor> gp(this->createRectGP());
if (!gp) {
SkDebugf("Could not create GrGeometryProcessor\n");
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorLocalCoordAttr) :
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
QuadHelper helper;
- void* vertices = helper.init(batchTarget, vertexStride, instanceCount);
+ void* vertices = helper.init(target, vertexStride, instanceCount);
if (!vertices) {
return;
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
*/
#include "GrDrawAtlasBatch.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "SkGr.h"
#include "SkRandom.h"
return GrDefaultGeoProcFactory::Create(gpColor, coverage, localCoords, viewMatrix);
}
-void GrDrawAtlasBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrDrawAtlasBatch::onPrepareDraws(Target* target) {
// Setup geometry processor
SkAutoTUnref<const GrGeometryProcessor> gp(set_vertex_attributes(this->hasColors(),
this->color(),
this->viewMatrix(),
this->coverageIgnored()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
QuadHelper helper;
int numQuads = this->quadCount();
- void* verts = helper.init(batchTarget, vertexStride, numQuads);
+ void* verts = helper.init(target, vertexStride, numQuads);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
memcpy(vertPtr, args.fVerts.begin(), allocSize);
vertPtr += allocSize;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
GrDrawAtlasBatch::GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& viewMatrix,
}
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& viewMatrix, int spriteCount,
const SkRSXform* xforms, const SkRect* rects, const SkColor* colors);
struct GrInitInvariantOutput;
/**
+ * GrDrawBatches are flushed in two phases (preDraw, and draw). In preDraw uploads to GrGpuResources
+ * and draws are determined and scheduled. They are issued in the draw phase. GrBatchToken is used
+ * to sequence the uploads relative to each other and to draws.
+ **/
+
+typedef uint64_t GrBatchToken;
+
+class GrBatchUploader : public SkRefCnt {
+public:
+ class TextureUploader;
+
+ GrBatchUploader(GrBatchToken lastUploadToken) : fLastUploadToken(lastUploadToken) {}
+ GrBatchToken lastUploadToken() const { return fLastUploadToken; }
+ virtual void upload(TextureUploader*)=0;
+
+private:
+ GrBatchToken fLastUploadToken;
+};
+
+/**
* Base class for GrBatches that draw. These batches have a GrPipeline installed by GrDrawTarget.
*/
class GrDrawBatch : public GrBatch {
public:
+ class Target;
+
GrDrawBatch();
~GrDrawBatch() override;
*/
virtual void initBatchTracker(const GrPipelineOptimizations&) = 0;
- SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
- bool fPipelineInstalled;
+protected:
+ SkTArray<SkAutoTUnref<GrBatchUploader>, true> fInlineUploads;
+
+private:
+ SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
+ bool fPipelineInstalled;
typedef GrBatch INHERITED;
};
#include "GrDrawVerticesBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrInvariantOutput.h"
#include "GrDefaultGeoProcFactory.h"
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
-void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrDrawVerticesBatch::onPrepareDraws(Target* target) {
int colorOffset = -1, texOffset = -1;
SkAutoTUnref<const GrGeometryProcessor> gp(
set_vertex_attributes(this->hasLocalCoords(), this->hasColors(), &colorOffset,
&texOffset, this->color(), this->viewMatrix(),
this->coverageIgnored()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, this->vertexCount(),
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, this->vertexCount(),
+ &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
uint16_t* indices = NULL;
if (this->hasIndices()) {
- indices = batchTarget->makeIndexSpace(this->indexCount(), &indexBuffer, &firstIndex);
+ indices = target->makeIndexSpace(this->indexCount(), &indexBuffer, &firstIndex);
if (!indices) {
SkDebugf("Could not allocate indices\n");
} else {
vertices.init(this->primitiveType(), vertexBuffer, firstVertex, this->vertexCount());
}
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
#include "SkRect.h"
#include "SkTDArray.h"
-class GrBatchTarget;
+class GrBatchFlushState;
struct GrInitInvariantOutput;
class GrDrawVerticesBatch : public GrVertexBatch {
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrDrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType,
const SkMatrix& viewMatrix,
const SkPoint* positions, int vertexCount,
#include "GrStrokeRectBatch.h"
#include "GrBatchTest.h"
+#include "GrBatchFlushState.h"
#include "SkRandom.h"
GrStrokeRectBatch::GrStrokeRectBatch(const Geometry& geometry, bool snapToPixelCenters) {
verts[9] = verts[1];
}
-
-void GrStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrStrokeRectBatch::onPrepareDraws(Target* target) {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
this->viewMatrix()));
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
GrVertices vertices;
vertices.init(primType, vertexBuffer, firstVertex, vertexCount);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
#ifdef GR_TEST_UTILS
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
private:
+ void onPrepareDraws(Target*) override;
+
GrStrokeRectBatch(const Geometry& geometry, bool snapToPixelCenters);
GrColor color() const { return fBatch.fColor; }
#ifndef GrTestBatch_DEFINED
#define GrTestBatch_DEFINED
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrGeometryProcessor.h"
#include "GrVertexBuffer.h"
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
- batchTarget->initDraw(fGeometryProcessor, this->pipeline());
-
- this->onGenerateGeometry(batchTarget);
- }
-
protected:
GrTestBatch(const GrGeometryProcessor* gp, const SkRect& bounds) {
fGeometryProcessor.reset(SkRef(gp));
const GrGeometryProcessor* geometryProcessor() const { return fGeometryProcessor; }
private:
+ void onPrepareDraws(Target* target) override {
+ target->initDraw(fGeometryProcessor, this->pipeline());
+ this->generateGeometry(target);
+ }
+
virtual Geometry* geoData(int index) = 0;
virtual const Geometry* geoData(int index) const = 0;
return false;
}
- virtual void onGenerateGeometry(GrBatchTarget* batchTarget) = 0;
+ virtual void generateGeometry(Target*) = 0;
struct BatchTracker {
GrColor fColor;
*/
#include "GrVertexBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrResourceProvider.h"
-GrVertexBatch::GrVertexBatch() : fNumberOfDraws(0) {}
+GrVertexBatch::GrVertexBatch() : fDrawArrays(1) {}
-void* GrVertexBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType,
- size_t vertexStride, const GrIndexBuffer* indexBuffer,
- int verticesPerInstance, int indicesPerInstance,
- int instancesToDraw) {
- SkASSERT(batchTarget);
+void GrVertexBatch::prepareDraws(GrBatchFlushState* state) {
+ Target target(state, this);
+ this->onPrepareDraws(&target);
+}
+
+void* GrVertexBatch::InstancedHelper::init(Target* target, GrPrimitiveType primType,
+ size_t vertexStride, const GrIndexBuffer* indexBuffer,
+ int verticesPerInstance, int indicesPerInstance,
+ int instancesToDraw) {
+ SkASSERT(target);
if (!indexBuffer) {
return NULL;
}
const GrVertexBuffer* vertexBuffer;
int firstVertex;
int vertexCount = verticesPerInstance * instancesToDraw;
- void* vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void* vertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
if (!vertices) {
SkDebugf("Vertices could not be allocated for instanced rendering.");
return NULL;
return vertices;
}
-void* GrVertexBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
+void GrVertexBatch::InstancedHelper::recordDraw(Target* target) {
+ SkASSERT(fVertices.instanceCount());
+ target->draw(fVertices);
+}
+
+void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
int quadsToDraw) {
SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
- batchTarget->resourceProvider()->refQuadIndexBuffer());
+ target->resourceProvider()->refQuadIndexBuffer());
if (!quadIndexBuffer) {
SkDebugf("Could not get quad index buffer.");
return NULL;
}
- return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ return this->INHERITED::init(target, kTriangles_GrPrimitiveType, vertexStride,
quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
}
+
+void GrVertexBatch::issueDraws(GrBatchFlushState* state) {
+ int uploadCnt = fInlineUploads.count();
+ int currUpload = 0;
+
+ // Iterate of all the drawArrays. Before issuing the draws in each array, perform any inline
+ // uploads.
+ for (SkTLList<DrawArray>::Iter da(fDrawArrays); da.get(); da.next()) {
+ state->advanceLastFlushedToken();
+ while (currUpload < uploadCnt &&
+ fInlineUploads[currUpload]->lastUploadToken() <= state->lastFlushedToken()) {
+ fInlineUploads[currUpload++]->upload(state->uploader());
+ }
+ const GrVertexBatch::DrawArray& drawArray = *da.get();
+ GrProgramDesc desc;
+ const GrPipeline* pipeline = this->pipeline();
+ const GrPrimitiveProcessor* primProc = drawArray.fPrimitiveProcessor.get();
+ state->gpu()->buildProgramDesc(&desc, *primProc, *pipeline, fBatchTracker);
+ GrGpu::DrawArgs args(primProc, pipeline, &desc, &fBatchTracker);
+
+ int drawCount = drawArray.fDraws.count();
+ for (int i = 0; i < drawCount; i++) {
+ state->gpu()->draw(args, drawArray.fDraws[i]);
+ }
+ }
+}
#define GrVertexBatch_DEFINED
#include "GrDrawBatch.h"
-#include "GrBatchTarget.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrPendingProgramElement.h"
+#include "GrVertices.h"
+
+#include "SkTLList.h"
+
+class GrBatchFlushState;
/**
* Base class for vertex-based GrBatches.
*/
class GrVertexBatch : public GrDrawBatch {
public:
- GrVertexBatch();
+ class Target;
- virtual void generateGeometry(GrBatchTarget*) = 0;
+ GrVertexBatch();
- // TODO this goes away when batches are everywhere
- void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
- int numberOfDraws() const { return fNumberOfDraws; }
+ void prepareDraws(GrBatchFlushState* state);
+ void issueDraws(GrBatchFlushState* state);
protected:
/** Helper for rendering instances using an instanced index index buffer. This class creates the
InstancedHelper() {}
/** Returns the allocated storage for the vertices. The caller should populate the before
vertices before calling issueDraws(). */
- void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStride,
+ void* init(Target*, GrPrimitiveType, size_t vertexStride,
const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
int instancesToDraw);
/** Call after init() to issue draws to the batch target.*/
- void issueDraw(GrBatchTarget* batchTarget) {
- SkASSERT(fVertices.instanceCount());
- batchTarget->draw(fVertices);
- }
+ void recordDraw(Target* target);
private:
GrVertices fVertices;
};
/** Finds the cached quad index buffer and reserves vertex space. Returns NULL on failure
and on sucess a pointer to the vertex data that the caller should populate before
calling issueDraws(). */
- void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw);
-
- using InstancedHelper::issueDraw;
+ void* init(Target* batchTarget, size_t vertexStride, int quadsToDraw);
+ using InstancedHelper::recordDraw;
private:
typedef InstancedHelper INHERITED;
};
private:
- int fNumberOfDraws;
+ virtual void onPrepareDraws(Target*) = 0;
+
+ // A set of contiguous draws with no inline uploads between them that all use the same
+ // primitive processor. All the draws in a DrawArray share a primitive processor and use the
+ // the batch's GrPipeline.
+ struct DrawArray {
+ SkSTArray<1, GrVertices, true> fDraws;
+ GrPendingProgramElement<const GrPrimitiveProcessor> fPrimitiveProcessor;
+ };
+
+ // Array of DrawArray. There may be inline uploads between each DrawArray and each DrawArray
+ // may use a different primitive processor.
+ SkTLList<DrawArray> fDrawArrays;
+
+ // What is this?
+ GrBatchTracker fBatchTracker;
+
typedef GrDrawBatch INHERITED;
};
#include "GrDashingEffect.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrGeometryProcessor.h"
bool fHasEndRect;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
int instanceCount = fGeoData.count();
SkPaint::Cap cap = this->cap();
bool isRoundCap = SkPaint::kRound_Cap == cap;
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
// useAA here means Edge AA or MSAA
bool useAA = this->aaMode() != kBW_DashAAMode;
}
QuadHelper helper;
- void* vertices = helper.init(batchTarget, gp->getVertexStride(), totalRectCount);
+ void* vertices = helper.init(target, gp->getVertexStride(), totalRectCount);
if (!vertices) {
return;
}
rectIndex++;
}
SkASSERT(0 == (curVIdx % 4) && (curVIdx / 4) == totalRectCount);
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
const char* name() const override { return "Test LCD Text Batch"; }
void initBatchTracker(const GrPipelineOptimizations&) override {}
bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
- void generateGeometry(GrBatchTarget*) override {}
+ void onPrepareDraws(Target*) override {};
} testLCDCoverageBatch;