if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
InstancedRendering* ir = this->getOpList()->instancedRendering();
- op.reset(ir->recordRect(croppedRect, viewMatrix, paint.getColor(), aa,
- fInstancedPipelineInfo, &aaType));
+ op = ir->recordRect(croppedRect, viewMatrix, paint.getColor(), aa, fInstancedPipelineInfo,
+ &aaType);
if (op) {
GrPipelineBuilder pipelineBuilder(paint, aaType);
if (ss) {
namespace gr_instanced {
-class GLInstancedRendering::GLBatch final : public InstancedRendering::Batch {
+class GLInstancedRendering::GLOp final : public InstancedRendering::Op {
public:
DEFINE_OP_CLASS_ID
- GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
+ GLOp(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
int numGLCommands() const { return 1 + fNumChangesInGeometry; }
private:
friend class GLInstancedRendering;
- typedef Batch INHERITED;
+ typedef Op INHERITED;
};
GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
return static_cast<GrGLGpu*>(this->gpu());
}
-InstancedRendering::Batch* GLInstancedRendering::createBatch() {
- return new GLBatch(this);
-}
+sk_sp<InstancedRendering::Op> GLInstancedRendering::makeOp() { return sk_sp<Op>(new GLOp(this)); }
void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
// Count what there is to draw.
- BatchList::Iter iter;
- iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
+ OpList::Iter iter;
+ iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
int numGLInstances = 0;
int numGLDrawCmds = 0;
- while (Batch* b = iter.get()) {
- GLBatch* batch = static_cast<GLBatch*>(b);
+ while (Op* o = iter.get()) {
+ GLOp* op = static_cast<GLOp*>(o);
iter.next();
- numGLInstances += batch->fNumDraws;
- numGLDrawCmds += batch->numGLCommands();
+ numGLInstances += op->fNumDraws;
+ numGLDrawCmds += op->numGLCommands();
}
if (!numGLDrawCmds) {
return;
SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
SkASSERT(!fGLDrawCmdsInfo);
- if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
+ if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
fGLDrawCmdsInfo.reset(numGLDrawCmds);
}
- // Generate the instance and draw-indirect buffer contents based on the tracked batches.
- iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
- while (Batch* b = iter.get()) {
- GLBatch* batch = static_cast<GLBatch*>(b);
+ // Generate the instance and draw-indirect buffer contents based on the tracked ops.
+ iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
+ while (Op* o = iter.get()) {
+ GLOp* op = static_cast<GLOp*>(o);
iter.next();
- batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
- batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
+ op->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
+ op->fGLDrawCmdsIdx = glDrawCmdsIdx;
- const Batch::Draw* draw = batch->fHeadDraw;
+ const Op::Draw* draw = op->fHeadDraw;
SkASSERT(draw);
do {
int instanceCount = 0;
glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
}
- if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
+ if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
cmdInfo.fGeometry = geometry;
cmdInfo.fInstanceCount = instanceCount;
}
void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
- const Batch* baseBatch) {
+ const Op* baseOp) {
if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
return; // beginFlush was not successful.
}
}
const GrGLCaps& glCaps = this->glGpu()->glCaps();
- const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
- int numCommands = batch->numGLCommands();
+ const GLOp* op = static_cast<const GLOp*>(baseOp);
+ int numCommands = op->numGLCommands();
-#if GR_GL_LOG_INSTANCED_BATCHES
+#if GR_GL_LOG_INSTANCED_OPS
SkASSERT(fGLDrawCmdsInfo);
- SkDebugf("Instanced batch: [");
+ SkDebugf("Instanced op: [");
for (int i = 0; i < numCommands; ++i) {
- int glCmdIdx = batch->fGLDrawCmdsIdx + i;
+ int glCmdIdx = op->fGLDrawCmdsIdx + i;
SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
}
if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
SkASSERT(fDrawIndirectBuffer);
- int glCmdsIdx = batch->fGLDrawCmdsIdx;
- this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
+ int glCmdsIdx = op->fGLDrawCmdsIdx;
+ this->flushInstanceAttribs(op->fEmulatedBaseInstance);
GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
(GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
numCommands, 0));
return;
}
- int emulatedBaseInstance = batch->fEmulatedBaseInstance;
+ int emulatedBaseInstance = op->fEmulatedBaseInstance;
for (int i = 0; i < numCommands; ++i) {
- int glCmdIdx = batch->fGLDrawCmdsIdx + i;
+ int glCmdIdx = op->fGLDrawCmdsIdx + i;
this->flushInstanceAttribs(emulatedBaseInstance);
if (fDrawIndirectBuffer) {
GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
class GrGLCaps;
class GrGLGpu;
-#define GR_GL_LOG_INSTANCED_BATCHES 0
+#define GR_GL_LOG_INSTANCED_OPS 0
namespace gr_instanced {
GrGLGpu* glGpu() const;
- Batch* createBatch() override;
+ sk_sp<Op> makeOp() override;
void onBeginFlush(GrResourceProvider*) override;
- void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) override;
+ void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) override;
void onEndFlush() override;
void onResetGpuResources(ResetType) override;
GrGpuResource::UniqueID fInstanceAttribsBufferUniqueId;
int fInstanceAttribsBaseInstance;
- class GLBatch;
+ class GLOp;
friend class ::GrGLCaps; // For CheckSupport.
return GrCaps::InstancedSupport::kMixedSampled;
}
-InstanceProcessor::InstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBuffer)
- : fBatchInfo(batchInfo) {
+InstanceProcessor::InstanceProcessor(OpInfo opInfo, GrBuffer* paramsBuffer) : fOpInfo(opInfo) {
this->initClassID<InstanceProcessor>();
this->addVertexAttrib("shapeCoords", kVec2f_GrVertexAttribType, kHigh_GrSLPrecision);
GR_STATIC_ASSERT(6 == (int)Attrib::kLocalRect);
GR_STATIC_ASSERT(7 == kNumAttribs);
- if (fBatchInfo.fHasParams) {
+ if (fOpInfo.fHasParams) {
SkASSERT(paramsBuffer);
fParamsAccess.reset(kRGBA_float_GrPixelConfig, paramsBuffer, kVertex_GrShaderFlag);
this->addBufferAccess(&fParamsAccess);
}
- if (fBatchInfo.fAntialiasMode >= AntialiasMode::kMSAA) {
- if (!fBatchInfo.isSimpleRects() ||
- AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode) {
+ if (fOpInfo.fAntialiasMode >= AntialiasMode::kMSAA) {
+ if (!fOpInfo.isSimpleRects() || AntialiasMode::kMixedSamples == fOpInfo.fAntialiasMode) {
this->setWillUseSampleLocations();
}
}
class GLSLInstanceProcessor::Backend {
public:
- static Backend* SK_WARN_UNUSED_RESULT Create(const GrPipeline&, BatchInfo, const VertexInputs&);
+ static Backend* SK_WARN_UNUSED_RESULT Create(const GrPipeline&, OpInfo, const VertexInputs&);
virtual ~Backend() {}
void init(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*);
const char* outColor);
protected:
- Backend(BatchInfo batchInfo, const VertexInputs& inputs)
- : fBatchInfo(batchInfo),
- fInputs(inputs),
- fModifiesCoverage(false),
- fModifiesColor(false),
- fNeedsNeighborRadii(false),
- fColor(kVec4f_GrSLType),
- fTriangleIsArc(kInt_GrSLType),
- fArcCoords(kVec2f_GrSLType),
- fInnerShapeCoords(kVec2f_GrSLType),
- fInnerRRect(kVec4f_GrSLType),
- fModifiedShapeCoords(nullptr) {
- if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) {
+ Backend(OpInfo opInfo, const VertexInputs& inputs)
+ : fOpInfo(opInfo)
+ , fInputs(inputs)
+ , fModifiesCoverage(false)
+ , fModifiesColor(false)
+ , fNeedsNeighborRadii(false)
+ , fColor(kVec4f_GrSLType)
+ , fTriangleIsArc(kInt_GrSLType)
+ , fArcCoords(kVec2f_GrSLType)
+ , fInnerShapeCoords(kVec2f_GrSLType)
+ , fInnerRRect(kVec4f_GrSLType)
+ , fModifiedShapeCoords(nullptr) {
+ if (fOpInfo.fShapeTypes & kRRect_ShapesMask) {
fModifiedShapeCoords = "adjustedShapeCoords";
}
}
void setupNinePatchRadii(GrGLSLVertexBuilder*);
void setupComplexRadii(GrGLSLVertexBuilder*);
- const BatchInfo fBatchInfo;
- const VertexInputs& fInputs;
- bool fModifiesCoverage;
- bool fModifiesColor;
- bool fNeedsNeighborRadii;
- GrGLSLVertToFrag fColor;
- GrGLSLVertToFrag fTriangleIsArc;
- GrGLSLVertToFrag fArcCoords;
- GrGLSLVertToFrag fInnerShapeCoords;
- GrGLSLVertToFrag fInnerRRect;
- const char* fModifiedShapeCoords;
+ const OpInfo fOpInfo;
+ const VertexInputs& fInputs;
+ bool fModifiesCoverage;
+ bool fModifiesColor;
+ bool fNeedsNeighborRadii;
+ GrGLSLVertToFrag fColor;
+ GrGLSLVertToFrag fTriangleIsArc;
+ GrGLSLVertToFrag fArcCoords;
+ GrGLSLVertToFrag fInnerShapeCoords;
+ GrGLSLVertToFrag fInnerRRect;
+ const char* fModifiedShapeCoords;
};
void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
varyingHandler->emitAttributes(ip);
VertexInputs inputs(ip, v);
- if (ip.batchInfo().fHasParams) {
+ if (ip.opInfo().fHasParams) {
SkASSERT(1 == ip.numBuffers());
inputs.initParams(args.fBufferSamplers[0]);
}
- if (!ip.batchInfo().fHasPerspective) {
+ if (!ip.opInfo().fHasPerspective) {
v->codeAppendf("mat2x3 shapeMatrix = mat2x3(%s, %s);",
inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::kShapeMatrixY));
} else {
v->codeAppend ("}");
}
- bool hasSingleShapeType = SkIsPow2(ip.batchInfo().fShapeTypes);
+ bool hasSingleShapeType = SkIsPow2(ip.opInfo().fShapeTypes);
if (!hasSingleShapeType) {
v->defineConstant("SHAPE_TYPE_BIT", kShapeType_InfoBit);
v->codeAppendf("uint shapeType = %s >> SHAPE_TYPE_BIT;",
inputs.attr(Attrib::kInstanceInfo));
}
- std::unique_ptr<Backend> backend(Backend::Create(pipeline, ip.batchInfo(), inputs));
+ std::unique_ptr<Backend> backend(Backend::Create(pipeline, ip.opInfo(), inputs));
backend->init(varyingHandler, v);
int usedShapeDefinitions = 0;
- if (hasSingleShapeType || !(ip.batchInfo().fShapeTypes & ~kRRect_ShapesMask)) {
- if (kRect_ShapeFlag == ip.batchInfo().fShapeTypes) {
+ if (hasSingleShapeType || !(ip.opInfo().fShapeTypes & ~kRRect_ShapesMask)) {
+ if (kRect_ShapeFlag == ip.opInfo().fShapeTypes) {
backend->setupRect(v);
- } else if (kOval_ShapeFlag == ip.batchInfo().fShapeTypes) {
+ } else if (kOval_ShapeFlag == ip.opInfo().fShapeTypes) {
backend->setupOval(v);
} else {
backend->setupRRect(v, &usedShapeDefinitions);
}
} else {
- if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) {
+ if (ip.opInfo().fShapeTypes & kRRect_ShapesMask) {
v->codeAppend ("if (shapeType >= SIMPLE_R_RECT_SHAPE_TYPE) {");
backend->setupRRect(v, &usedShapeDefinitions);
v->codeAppend ("}");
usedShapeDefinitions |= kSimpleRRect_ShapeFlag;
}
- if (ip.batchInfo().fShapeTypes & kOval_ShapeFlag) {
- if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) {
- if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) {
+ if (ip.opInfo().fShapeTypes & kOval_ShapeFlag) {
+ if (ip.opInfo().fShapeTypes & kRect_ShapeFlag) {
+ if (ip.opInfo().fShapeTypes & kRRect_ShapesMask) {
v->codeAppend ("else ");
}
v->codeAppend ("if (OVAL_SHAPE_TYPE == shapeType) {");
backend->setupOval(v);
v->codeAppend ("}");
}
- if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) {
+ if (ip.opInfo().fShapeTypes & kRect_ShapeFlag) {
v->codeAppend ("else {");
backend->setupRect(v);
v->codeAppend ("}");
}
}
- if (ip.batchInfo().fInnerShapeTypes) {
- bool hasSingleInnerShapeType = SkIsPow2(ip.batchInfo().fInnerShapeTypes);
+ if (ip.opInfo().fInnerShapeTypes) {
+ bool hasSingleInnerShapeType = SkIsPow2(ip.opInfo().fInnerShapeTypes);
if (!hasSingleInnerShapeType) {
v->defineConstantf("int", "INNER_SHAPE_TYPE_MASK", "0x%x", kInnerShapeType_InfoMask);
v->defineConstant("INNER_SHAPE_TYPE_BIT", kInnerShapeType_InfoBit);
backend->initInnerShape(varyingHandler, v);
- SkASSERT(0 == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask) ||
- kSimpleRRect_ShapeFlag == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask));
+ SkASSERT(0 == (ip.opInfo().fInnerShapeTypes & kRRect_ShapesMask) ||
+ kSimpleRRect_ShapeFlag == (ip.opInfo().fInnerShapeTypes & kRRect_ShapesMask));
if (hasSingleInnerShapeType) {
- if (kRect_ShapeFlag == ip.batchInfo().fInnerShapeTypes) {
+ if (kRect_ShapeFlag == ip.opInfo().fInnerShapeTypes) {
backend->setupInnerRect(v);
- } else if (kOval_ShapeFlag == ip.batchInfo().fInnerShapeTypes) {
+ } else if (kOval_ShapeFlag == ip.opInfo().fInnerShapeTypes) {
backend->setupInnerOval(v);
} else {
backend->setupInnerSimpleRRect(v);
}
} else {
- if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
+ if (ip.opInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
v->codeAppend ("if (SIMPLE_R_RECT_SHAPE_TYPE == innerShapeType) {");
backend->setupInnerSimpleRRect(v);
v->codeAppend("}");
usedShapeDefinitions |= kSimpleRRect_ShapeFlag;
}
- if (ip.batchInfo().fInnerShapeTypes & kOval_ShapeFlag) {
- if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) {
- if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
+ if (ip.opInfo().fInnerShapeTypes & kOval_ShapeFlag) {
+ if (ip.opInfo().fInnerShapeTypes & kRect_ShapeFlag) {
+ if (ip.opInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
v->codeAppend ("else ");
}
v->codeAppend ("if (OVAL_SHAPE_TYPE == innerShapeType) {");
backend->setupInnerOval(v);
v->codeAppend("}");
}
- if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) {
+ if (ip.opInfo().fInnerShapeTypes & kRect_ShapeFlag) {
v->codeAppend("else {");
backend->setupInnerRect(v);
v->codeAppend("}");
args.fOutputColor);
const char* localCoords = nullptr;
- if (ip.batchInfo().fUsesLocalCoords) {
+ if (ip.opInfo().fUsesLocalCoords) {
localCoords = "localCoords";
v->codeAppendf("vec2 t = 0.5 * (%s + vec2(1));", backend->outShapeCoords());
v->codeAppendf("vec2 localCoords = (1.0 - t) * %s.xy + t * %s.zw;",
inputs.attr(Attrib::kLocalRect), inputs.attr(Attrib::kLocalRect));
}
- if (ip.batchInfo().fHasLocalMatrix && ip.batchInfo().fHasParams) {
+ if (ip.opInfo().fHasLocalMatrix && ip.opInfo().fHasParams) {
v->defineConstantf("int", "LOCAL_MATRIX_FLAG", "0x%x", kLocalMatrix_InfoFlag);
v->codeAppendf("if (0 != (%s & LOCAL_MATRIX_FLAG)) {",
inputs.attr(Attrib::kInstanceInfo));
- if (!ip.batchInfo().fUsesLocalCoords) {
+ if (!ip.opInfo().fUsesLocalCoords) {
inputs.skipParams(2);
} else {
v->codeAppendf( "mat2x3 localMatrix;");
v->codeAppend("}");
}
- GrSLType positionType = ip.batchInfo().fHasPerspective ? kVec3f_GrSLType : kVec2f_GrSLType;
+ GrSLType positionType = ip.opInfo().fHasPerspective ? kVec3f_GrSLType : kVec2f_GrSLType;
v->codeAppendf("%s deviceCoords = vec3(%s, 1) * shapeMatrix;",
GrGLSLTypeString(positionType), backend->outShapeCoords());
gpArgs->fPositionVar.set(positionType, "deviceCoords");
v->codeAppend ("mat2 p = ");
fInputs.fetchNextParam(kMat22f_GrSLType);
v->codeAppend (";");
- uint8_t types = fBatchInfo.fShapeTypes & kRRect_ShapesMask;
+ uint8_t types = fOpInfo.fShapeTypes & kRRect_ShapesMask;
if (0 == (types & (types - 1))) {
if (kSimpleRRect_ShapeFlag == types) {
this->setupSimpleRadii(v);
void GLSLInstanceProcessor::Backend::initInnerShape(GrGLSLVaryingHandler* varyingHandler,
GrGLSLVertexBuilder* v) {
- SkASSERT(!(fBatchInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag)));
+ SkASSERT(!(fOpInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag)));
this->onInitInnerShape(varyingHandler, v);
class GLSLInstanceProcessor::BackendNonAA : public Backend {
public:
- BackendNonAA(BatchInfo batchInfo, const VertexInputs& inputs)
- : INHERITED(batchInfo, inputs) {
- if (fBatchInfo.fCannotDiscard && !fBatchInfo.isSimpleRects()) {
- fModifiesColor = !fBatchInfo.fCannotTweakAlphaForCoverage;
+ BackendNonAA(OpInfo opInfo, const VertexInputs& inputs) : INHERITED(opInfo, inputs) {
+ if (fOpInfo.fCannotDiscard && !fOpInfo.isSimpleRects()) {
+ fModifiesColor = !fOpInfo.fCannotTweakAlphaForCoverage;
fModifiesCoverage = !fModifiesColor;
}
}
void GLSLInstanceProcessor::BackendNonAA::onInit(GrGLSLVaryingHandler* varyingHandler,
GrGLSLVertexBuilder*) {
- if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ if (kRect_ShapeFlag != fOpInfo.fShapeTypes) {
varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
varyingHandler->addVarying("arcCoords", &fArcCoords, kMedium_GrSLPrecision);
}
void GLSLInstanceProcessor::BackendNonAA::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler,
GrGLSLVertexBuilder*) {
varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_GrSLPrecision);
- if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes &&
- kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ if (kRect_ShapeFlag != fOpInfo.fInnerShapeTypes &&
+ kOval_ShapeFlag != fOpInfo.fInnerShapeTypes) {
varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLPrecision);
}
}
const char* outCoverage,
const char* outColor) {
const char* dropFragment = nullptr;
- if (!fBatchInfo.fCannotDiscard) {
+ if (!fOpInfo.fCannotDiscard) {
dropFragment = "discard";
} else if (fModifiesCoverage) {
f->codeAppend ("lowp float covered = 1.0;");
f->codeAppendf("if (%s != 0 && dot(%s, %s) > 1.0) %s;",
fTriangleIsArc.fsIn(), fArcCoords.fsIn(), fArcCoords.fsIn(), dropFragment);
}
- if (fBatchInfo.fInnerShapeTypes) {
+ if (fOpInfo.fInnerShapeTypes) {
SkASSERT(dropFragment);
f->codeAppendf("// Inner shape.\n");
- if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) {
f->codeAppendf("if (all(lessThanEqual(abs(%s), vec2(1)))) %s;",
fInnerShapeCoords.fsIn(), dropFragment);
- } else if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ } else if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) {
f->codeAppendf("if ((dot(%s, %s) <= 1.0)) %s;",
fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn(), dropFragment);
} else {
class GLSLInstanceProcessor::BackendCoverage : public Backend {
public:
- BackendCoverage(BatchInfo batchInfo, const VertexInputs& inputs)
- : INHERITED(batchInfo, inputs),
- fColorTimesRectCoverage(kVec4f_GrSLType),
- fRectCoverage(kFloat_GrSLType),
- fEllipseCoords(kVec2f_GrSLType),
- fEllipseName(kVec2f_GrSLType),
- fBloatedRadius(kFloat_GrSLType),
- fDistanceToInnerEdge(kVec2f_GrSLType),
- fInnerShapeBloatedHalfSize(kVec2f_GrSLType),
- fInnerEllipseCoords(kVec2f_GrSLType),
- fInnerEllipseName(kVec2f_GrSLType) {
- fShapeIsCircle = !fBatchInfo.fNonSquare && !(fBatchInfo.fShapeTypes & kRRect_ShapesMask);
- fTweakAlphaForCoverage = !fBatchInfo.fCannotTweakAlphaForCoverage &&
- !fBatchInfo.fInnerShapeTypes;
+ BackendCoverage(OpInfo opInfo, const VertexInputs& inputs)
+ : INHERITED(opInfo, inputs)
+ , fColorTimesRectCoverage(kVec4f_GrSLType)
+ , fRectCoverage(kFloat_GrSLType)
+ , fEllipseCoords(kVec2f_GrSLType)
+ , fEllipseName(kVec2f_GrSLType)
+ , fBloatedRadius(kFloat_GrSLType)
+ , fDistanceToInnerEdge(kVec2f_GrSLType)
+ , fInnerShapeBloatedHalfSize(kVec2f_GrSLType)
+ , fInnerEllipseCoords(kVec2f_GrSLType)
+ , fInnerEllipseName(kVec2f_GrSLType) {
+ fShapeIsCircle = !fOpInfo.fNonSquare && !(fOpInfo.fShapeTypes & kRRect_ShapesMask);
+ fTweakAlphaForCoverage = !fOpInfo.fCannotTweakAlphaForCoverage && !fOpInfo.fInnerShapeTypes;
fModifiesCoverage = !fTweakAlphaForCoverage;
fModifiesColor = fTweakAlphaForCoverage;
fModifiedShapeCoords = "bloatedShapeCoords";
v->codeAppend ("vec2 bloat = 0.5 / shapeHalfSize;");
v->codeAppendf("bloatedShapeCoords = %s * (1.0 + bloat);", fInputs.attr(Attrib::kShapeCoords));
- if (kOval_ShapeFlag != fBatchInfo.fShapeTypes) {
+ if (kOval_ShapeFlag != fOpInfo.fShapeTypes) {
if (fTweakAlphaForCoverage) {
varyingHandler->addVarying("colorTimesRectCoverage", &fColorTimesRectCoverage,
kLow_GrSLPrecision);
- if (kRect_ShapeFlag == fBatchInfo.fShapeTypes) {
+ if (kRect_ShapeFlag == fOpInfo.fShapeTypes) {
fColor = fColorTimesRectCoverage;
}
} else {
}
v->codeAppend("float rectCoverage = 0.0;");
}
- if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ if (kRect_ShapeFlag != fOpInfo.fShapeTypes) {
varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
if (!fShapeIsCircle) {
varyingHandler->addVarying("ellipseCoords", &fEllipseCoords, kMedium_GrSLPrecision);
GrGLSLVertexBuilder* v) {
v->codeAppend("vec2 innerShapeHalfSize = shapeHalfSize / outer2Inner.xy;");
- if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) {
varyingHandler->addVarying("innerEllipseCoords", &fInnerEllipseCoords,
kMedium_GrSLPrecision);
varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, kHigh_GrSLPrecision);
kMedium_GrSLPrecision);
varyingHandler->addFlatVarying("innerShapeBloatedHalfSize", &fInnerShapeBloatedHalfSize,
kMedium_GrSLPrecision);
- if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ if (kRect_ShapeFlag != fOpInfo.fInnerShapeTypes) {
varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords,
kMedium_GrSLPrecision);
varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName,
}
SkString coverage("lowp float coverage");
- if (fBatchInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsArc.fsIn())) {
+ if (fOpInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsArc.fsIn())) {
f->codeAppendf("%s;", coverage.c_str());
coverage = "coverage";
}
if (fShapeIsCircle) {
this->emitCircle(f, coverage.c_str());
} else {
- bool ellipseCoordsMayBeNegative = SkToBool(fBatchInfo.fShapeTypes & kOval_ShapeFlag);
+ bool ellipseCoordsMayBeNegative = SkToBool(fOpInfo.fShapeTypes & kOval_ShapeFlag);
this->emitArc(f, fEllipseCoords.fsIn(), fEllipseName.fsIn(),
true /*ellipseCoordsNeedClamp*/, ellipseCoordsMayBeNegative,
coverage.c_str());
this->emitRect(f, coverage.c_str(), outColor);
}
- if (fBatchInfo.fInnerShapeTypes) {
+ if (fOpInfo.fInnerShapeTypes) {
f->codeAppendf("// Inner shape.\n");
SkString innerCoverageDecl("lowp float innerCoverage");
- if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) {
this->emitArc(f, fInnerEllipseCoords.fsIn(), fInnerEllipseName.fsIn(),
true /*ellipseCoordsNeedClamp*/, true /*ellipseCoordsMayBeNegative*/,
innerCoverageDecl.c_str());
fDistanceToInnerEdge.vsOut());
v->codeAppendf("%s = innerShapeHalfSize + 0.5;", fInnerShapeBloatedHalfSize.vsOut());
- if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) {
this->emitInnerRect(f, innerCoverageDecl.c_str());
} else {
f->codeAppendf("%s = 0.0;", innerCoverageDecl.c_str());
void GLSLInstanceProcessor::BackendCoverage::emitCircle(GrGLSLPPFragmentBuilder* f,
const char* outCoverage) {
// TODO: circleCoords = max(circleCoords, 0) if we decide to do this optimization on rrects.
- SkASSERT(!(kRRect_ShapesMask & fBatchInfo.fShapeTypes));
+ SkASSERT(!(kRRect_ShapesMask & fOpInfo.fShapeTypes));
f->codeAppendf("mediump float distanceToEdge = %s - length(%s);",
fBloatedRadius.fsIn(), fEllipseCoords.fsIn());
f->codeAppendf("%s = clamp(distanceToEdge, 0.0, 1.0);", outCoverage);
class GLSLInstanceProcessor::BackendMultisample : public Backend {
public:
- BackendMultisample(BatchInfo batchInfo, const VertexInputs& inputs, int effectiveSampleCnt)
- : INHERITED(batchInfo, inputs),
- fEffectiveSampleCnt(effectiveSampleCnt),
- fShapeCoords(kVec2f_GrSLType),
- fShapeInverseMatrix(kMat22f_GrSLType),
- fFragShapeHalfSpan(kVec2f_GrSLType),
- fArcTest(kVec2f_GrSLType),
- fArcInverseMatrix(kMat22f_GrSLType),
- fFragArcHalfSpan(kVec2f_GrSLType),
- fEarlyAccept(kInt_GrSLType),
- fInnerShapeInverseMatrix(kMat22f_GrSLType),
- fFragInnerShapeHalfSpan(kVec2f_GrSLType) {
- fRectTrianglesMaySplit = fBatchInfo.fHasPerspective;
- fNeedsNeighborRadii = this->isMixedSampled() && !fBatchInfo.fHasPerspective;
+ BackendMultisample(OpInfo opInfo, const VertexInputs& inputs, int effectiveSampleCnt)
+ : INHERITED(opInfo, inputs)
+ , fEffectiveSampleCnt(effectiveSampleCnt)
+ , fShapeCoords(kVec2f_GrSLType)
+ , fShapeInverseMatrix(kMat22f_GrSLType)
+ , fFragShapeHalfSpan(kVec2f_GrSLType)
+ , fArcTest(kVec2f_GrSLType)
+ , fArcInverseMatrix(kMat22f_GrSLType)
+ , fFragArcHalfSpan(kVec2f_GrSLType)
+ , fEarlyAccept(kInt_GrSLType)
+ , fInnerShapeInverseMatrix(kMat22f_GrSLType)
+ , fFragInnerShapeHalfSpan(kVec2f_GrSLType) {
+ fRectTrianglesMaySplit = fOpInfo.fHasPerspective;
+ fNeedsNeighborRadii = this->isMixedSampled() && !fOpInfo.fHasPerspective;
}
private:
- bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode; }
+ bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fOpInfo.fAntialiasMode; }
void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
void setupRect(GrGLSLVertexBuilder*) override;
void GLSLInstanceProcessor::BackendMultisample::onInit(GrGLSLVaryingHandler* varyingHandler,
GrGLSLVertexBuilder* v) {
if (!this->isMixedSampled()) {
- if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ if (kRect_ShapeFlag != fOpInfo.fShapeTypes) {
varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision);
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix,
kHigh_GrSLPrecision);
varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan,
kHigh_GrSLPrecision);
}
- } else if (!fBatchInfo.fInnerShapeTypes) {
+ } else if (!fOpInfo.fInnerShapeTypes) {
return;
}
} else {
varyingHandler->addVarying("shapeCoords", &fShapeCoords, kHigh_GrSLPrecision);
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
varyingHandler->addFlatVarying("shapeInverseMatrix", &fShapeInverseMatrix,
kHigh_GrSLPrecision);
varyingHandler->addFlatVarying("fragShapeHalfSpan", &fFragShapeHalfSpan,
kHigh_GrSLPrecision);
}
- if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) {
+ if (fOpInfo.fShapeTypes & kRRect_ShapesMask) {
varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision);
varyingHandler->addVarying("arcTest", &fArcTest, kHigh_GrSLPrecision);
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix,
kHigh_GrSLPrecision);
varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan,
kHigh_GrSLPrecision);
}
- } else if (fBatchInfo.fShapeTypes & kOval_ShapeFlag) {
+ } else if (fOpInfo.fShapeTypes & kOval_ShapeFlag) {
fArcCoords = fShapeCoords;
fArcInverseMatrix = fShapeInverseMatrix;
fFragArcHalfSpan = fFragShapeHalfSpan;
- if (fBatchInfo.fShapeTypes & kRect_ShapeFlag) {
+ if (fOpInfo.fShapeTypes & kRect_ShapeFlag) {
varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc,
kLow_GrSLPrecision);
}
}
- if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
- v->defineConstantf("int", "SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1);
- varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_GrSLPrecision);
+ if (kRect_ShapeFlag != fOpInfo.fShapeTypes) {
+ v->defineConstantf("int", "SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1);
+ varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_GrSLPrecision);
}
}
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
v->codeAppend("mat2 shapeInverseMatrix = inverse(mat2(shapeMatrix));");
v->codeAppend("vec2 fragShapeSpan = abs(vec4(shapeInverseMatrix).xz) + "
"abs(vec4(shapeInverseMatrix).yw);");
return;
}
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
// For the mixed samples algorithm it's best to bloat the corner triangles a bit so that
// more of the pixels that cross into the arc region are completely inside the shared edges.
// We also snap to a regular rect if the radii shrink smaller than a pixel.
v->codeAppendf("%s = (cornerSize == vec2(0)) ? vec2(0) : "
"cornerSign * %s * mat2(1, cornerSize.x - 1.0, cornerSize.y - 1.0, 1);",
fArcTest.vsOut(), fModifiedShapeCoords);
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
// Shift the point at which distances to edges are measured from the center of the pixel
// to the corner. This way the sign of fArcTest will quickly tell us whether a pixel
// is completely inside the shared edge. Perspective mode will accomplish this same task
GLSLInstanceProcessor::BackendMultisample::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler,
GrGLSLVertexBuilder* v) {
varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kHigh_GrSLPrecision);
- if (kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes &&
- kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ if (kOval_ShapeFlag != fOpInfo.fInnerShapeTypes &&
+ kRect_ShapeFlag != fOpInfo.fInnerShapeTypes) {
varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrSLPrecision);
}
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
varyingHandler->addFlatVarying("innerShapeInverseMatrix", &fInnerShapeInverseMatrix,
kHigh_GrSLPrecision);
v->codeAppendf("%s = shapeInverseMatrix * mat2(outer2Inner.x, 0, 0, outer2Inner.y);",
f->defineConstantf("int", "SAMPLE_MASK_MSB", "0x%x", 1 << (fEffectiveSampleCnt - 1));
}
- if (kRect_ShapeFlag != (fBatchInfo.fShapeTypes | fBatchInfo.fInnerShapeTypes)) {
+ if (kRect_ShapeFlag != (fOpInfo.fShapeTypes | fOpInfo.fInnerShapeTypes)) {
GrShaderVar x("x", kVec2f_GrSLType, GrShaderVar::kNonArray, kHigh_GrSLPrecision);
f->emitFunction(kFloat_GrSLType, "square", 1, &x, "return dot(x, x);", &fSquareFun);
}
arcCoords.fVarying = &fArcCoords;
arcCoords.fInverseMatrix = fArcInverseMatrix.fsIn();
arcCoords.fFragHalfSpan = fFragArcHalfSpan.fsIn();
- bool clampArcCoords = this->isMixedSampled() && (fBatchInfo.fShapeTypes & kRRect_ShapesMask);
+ bool clampArcCoords = this->isMixedSampled() && (fOpInfo.fShapeTypes & kRRect_ShapesMask);
EmitShapeOpts opts;
opts.fIsTightGeometry = true;
opts.fResolveMixedSamples = this->isMixedSampled();
opts.fInvertCoverage = false;
- if (fBatchInfo.fHasPerspective && fBatchInfo.fInnerShapeTypes) {
+ if (fOpInfo.fHasPerspective && fOpInfo.fInnerShapeTypes) {
// This determines if the fragment should consider the inner shape in its sample mask.
// We take the derivative early in case discards may occur before we get to the inner shape.
f->codeAppendf("highp vec2 fragInnerShapeApproxHalfSpan = 0.5 * fwidth(%s);",
}
} else {
const char* arcTest = fArcTest.fsIn();
- if (arcTest && fBatchInfo.fHasPerspective) {
+ if (arcTest && fOpInfo.fHasPerspective) {
// The non-perspective version accounts for fwidth() in the vertex shader.
// We make sure to take the derivative here, before a neighbor pixel may early accept.
f->codeAppendf("highp vec2 arcTest = %s - 0.5 * fwidth(%s);",
f->codeAppend ("} else {");
this->emitArc(f, arcCoords, false, clampArcCoords, opts);
f->codeAppend ("}");
- } else if (fBatchInfo.fShapeTypes == kOval_ShapeFlag) {
+ } else if (fOpInfo.fShapeTypes == kOval_ShapeFlag) {
this->emitArc(f, arcCoords, false, clampArcCoords, opts);
} else {
- SkASSERT(fBatchInfo.fShapeTypes == kRect_ShapeFlag);
+ SkASSERT(fOpInfo.fShapeTypes == kRect_ShapeFlag);
this->emitRect(f, shapeCoords, opts);
}
f->codeAppend ("}");
}
- if (fBatchInfo.fInnerShapeTypes) {
+ if (fOpInfo.fInnerShapeTypes) {
f->codeAppendf("// Inner shape.\n");
EmitShapeCoords innerShapeCoords;
innerShapeCoords.fVarying = &fInnerShapeCoords;
- if (!fBatchInfo.fHasPerspective) {
+ if (!fOpInfo.fHasPerspective) {
innerShapeCoords.fInverseMatrix = fInnerShapeInverseMatrix.fsIn();
innerShapeCoords.fFragHalfSpan = fFragInnerShapeHalfSpan.fsIn();
}
innerOpts.fResolveMixedSamples = false; // Mixed samples are resolved in the outer shape.
innerOpts.fInvertCoverage = true;
- if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) {
this->emitArc(f, innerShapeCoords, true, false, innerOpts);
} else {
f->codeAppendf("if (all(lessThan(abs(%s), 1.0 + %s))) {", fInnerShapeCoords.fsIn(),
- !fBatchInfo.fHasPerspective ? innerShapeCoords.fFragHalfSpan
- : "fragInnerShapeApproxHalfSpan"); // Above.
- if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ !fOpInfo.fHasPerspective ? innerShapeCoords.fFragHalfSpan
+ : "fragInnerShapeApproxHalfSpan"); // Above.
+ if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) {
this->emitRect(f, innerShapeCoords, innerOpts);
} else {
this->emitSimpleRRect(f, innerShapeCoords, fInnerRRect.fsIn(), innerOpts);
// fragment.
f->codeAppend("if ((gl_SampleMaskIn[0] & SAMPLE_MASK_MSB) == 0) {");
// Drop this fragment.
- if (!fBatchInfo.fCannotDiscard) {
+ if (!fOpInfo.fCannotDiscard) {
f->codeAppend("discard;");
} else {
f->overrideSampleCoverage("0");
f->codeAppend("}");
}
} else { // Reject the entire fragment.
- if (!fBatchInfo.fCannotDiscard) {
+ if (!fOpInfo.fCannotDiscard) {
f->codeAppend("discard;");
} else if (opts.fResolveMixedSamples) {
f->overrideSampleCoverage("0");
SkASSERT(!opts.fInvertCoverage);
f->codeAppendf("if ((gl_SampleMaskIn[0] & (1 << findMSB(%s))) == 0) {", shapeMask);
// Drop this fragment.
- if (!fBatchInfo.fCannotDiscard) {
+ if (!fOpInfo.fCannotDiscard) {
f->codeAppend ("discard;");
} else {
f->overrideSampleCoverage("0");
////////////////////////////////////////////////////////////////////////////////////////////////////
-GLSLInstanceProcessor::Backend*
-GLSLInstanceProcessor::Backend::Create(const GrPipeline& pipeline, BatchInfo batchInfo,
- const VertexInputs& inputs) {
- switch (batchInfo.fAntialiasMode) {
+GLSLInstanceProcessor::Backend* GLSLInstanceProcessor::Backend::Create(const GrPipeline& pipeline,
+ OpInfo opInfo,
+ const VertexInputs& inputs) {
+ switch (opInfo.fAntialiasMode) {
default:
SkFAIL("Unexpected antialias mode.");
case AntialiasMode::kNone:
- return new BackendNonAA(batchInfo, inputs);
+ return new BackendNonAA(opInfo, inputs);
case AntialiasMode::kCoverage:
- return new BackendCoverage(batchInfo, inputs);
+ return new BackendCoverage(opInfo, inputs);
case AntialiasMode::kMSAA:
case AntialiasMode::kMixedSamples: {
const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTargetPriv();
const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipeline);
- return new BackendMultisample(batchInfo, inputs, specs.fEffectiveSampleCnt);
+ return new BackendMultisample(opInfo, inputs, specs.fEffectiveSampleCnt);
}
}
}
*/
class InstanceProcessor : public GrGeometryProcessor {
public:
- InstanceProcessor(BatchInfo, GrBuffer* paramsBuffer);
+ InstanceProcessor(OpInfo, GrBuffer* paramsBuffer);
const char* name() const override { return "Instance Processor"; }
- BatchInfo batchInfo() const { return fBatchInfo; }
+ OpInfo opInfo() const { return fOpInfo; }
void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
- b->add32(fBatchInfo.fData);
+ b->add32(fOpInfo.fData);
}
GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
*/
static GrCaps::InstancedSupport CheckSupport(const GrShaderCaps&, const GrCaps&);
- const BatchInfo fBatchInfo;
- BufferAccess fParamsAccess;
+ OpInfo fOpInfo;
+ BufferAccess fParamsAccess;
friend class GLInstancedRendering; // For CheckSupport.
fDrawPool(1024, 1024) {
}
-GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
- GrColor color, GrAA aa,
- const GrInstancedPipelineInfo& info, GrAAType* aaType) {
+sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa, info, aaType);
}
-GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
- GrColor color, const SkRect& localRect, GrAA aa,
- const GrInstancedPipelineInfo& info, GrAAType* aaType) {
+sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, const SkRect& localRect, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRect, aa, info,
aaType);
}
-GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
- GrColor color, const SkMatrix& localMatrix,
- GrAA aa, const GrInstancedPipelineInfo& info,
- GrAAType* aaType) {
+sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, const SkMatrix& localMatrix, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
if (localMatrix.hasPerspective()) {
return nullptr; // Perspective is not yet supported in the local matrix.
}
- if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa,
- info, aaType)) {
- batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
- batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
- localMatrix.getTranslateX());
- batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
- localMatrix.getTranslateY());
- batch->fInfo.fHasLocalMatrix = true;
- return batch;
+ if (sk_sp<Op> op = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa, info,
+ aaType)) {
+ op->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
+ op->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
+ localMatrix.getTranslateX());
+ op->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
+ localMatrix.getTranslateY());
+ op->fInfo.fHasLocalMatrix = true;
+ return std::move(op);
}
return nullptr;
}
-GrDrawOp* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix,
- GrColor color, GrAA aa,
- const GrInstancedPipelineInfo& info, GrAAType* aaType) {
+sk_sp<GrDrawOp> InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix,
+ GrColor color, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, aa, info, aaType);
}
-GrDrawOp* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix,
- GrColor color, GrAA aa,
- const GrInstancedPipelineInfo& info, GrAAType* aaType) {
- if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color,
+sk_sp<GrDrawOp> InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix,
+ GrColor color, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
+ if (sk_sp<Op> op = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color,
rrect.rect(), aa, info, aaType)) {
- batch->appendRRectParams(rrect);
- return batch;
+ op->appendRRectParams(rrect);
+ return std::move(op);
}
return nullptr;
}
-GrDrawOp* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner,
- const SkMatrix& viewMatrix, GrColor color,
- GrAA aa, const GrInstancedPipelineInfo& info,
- GrAAType* aaType) {
+sk_sp<GrDrawOp> InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkMatrix& viewMatrix, GrColor color, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
if (inner.getType() > SkRRect::kSimple_Type) {
return nullptr; // Complex inner round rects are not yet supported.
}
if (SkRRect::kEmpty_Type == inner.getType()) {
return this->recordRRect(outer, viewMatrix, color, aa, info, aaType);
}
- if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color,
+ if (sk_sp<Op> op = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color,
outer.rect(), aa, info, aaType)) {
- batch->appendRRectParams(outer);
+ op->appendRRectParams(outer);
ShapeType innerShapeType = GetRRectShapeType(inner);
- batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
- batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
- batch->appendParamsTexel(inner.rect().asScalars(), 4);
- batch->appendRRectParams(inner);
- return batch;
+ op->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
+ op->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
+ op->appendParamsTexel(inner.rect().asScalars(), 4);
+ op->appendRRectParams(inner);
+ return std::move(op);
}
return nullptr;
}
-InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const SkRect& bounds,
- const SkMatrix& viewMatrix,
- GrColor color, const SkRect& localRect,
- GrAA aa,
- const GrInstancedPipelineInfo& info,
- GrAAType* aaType) {
+sk_sp<InstancedRendering::Op> InstancedRendering::recordShape(
+ ShapeType type, const SkRect& bounds, const SkMatrix& viewMatrix, GrColor color,
+ const SkRect& localRect, GrAA aa, const GrInstancedPipelineInfo& info, GrAAType* aaType) {
SkASSERT(State::kRecordingDraws == fState);
if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) {
return nullptr;
}
- Batch* batch = this->createBatch();
- batch->fInfo.fAntialiasMode = antialiasMode;
- batch->fInfo.fShapeTypes = GetShapeFlag(type);
- batch->fInfo.fCannotDiscard = !info.fCanDiscard;
+ sk_sp<Op> op = this->makeOp();
+ op->fInfo.fAntialiasMode = antialiasMode;
+ op->fInfo.fShapeTypes = GetShapeFlag(type);
+ op->fInfo.fCannotDiscard = !info.fCanDiscard;
- Instance& instance = batch->getSingleInstance();
+ Instance& instance = op->getSingleInstance();
instance.fInfo = (int)type << kShapeType_InfoBit;
- Batch::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage)
- ? Batch::HasAABloat::kYes
- : Batch::HasAABloat::kNo;
- Batch::IsZeroArea zeroArea = (bounds.isEmpty()) ? Batch::IsZeroArea::kYes
- : Batch::IsZeroArea::kNo;
+ Op::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage) ? Op::HasAABloat::kYes
+ : Op::HasAABloat::kNo;
+ Op::IsZeroArea zeroArea = (bounds.isEmpty()) ? Op::IsZeroArea::kYes : Op::IsZeroArea::kNo;
// The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
// will map this rectangle to the same device coordinates as "viewMatrix * bounds".
// it's quite simple to find the bounding rectangle:
float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
- SkRect batchBounds;
- batchBounds.fLeft = m[2] - devBoundsHalfWidth;
- batchBounds.fRight = m[2] + devBoundsHalfWidth;
- batchBounds.fTop = m[5] - devBoundsHalfHeight;
- batchBounds.fBottom = m[5] + devBoundsHalfHeight;
- batch->setBounds(batchBounds, aaBloat, zeroArea);
+ SkRect opBounds;
+ opBounds.fLeft = m[2] - devBoundsHalfWidth;
+ opBounds.fRight = m[2] + devBoundsHalfWidth;
+ opBounds.fTop = m[5] - devBoundsHalfHeight;
+ opBounds.fBottom = m[5] + devBoundsHalfHeight;
+ op->setBounds(opBounds, aaBloat, zeroArea);
// TODO: Is this worth the CPU overhead?
- batch->fInfo.fNonSquare =
- fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
- fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
- fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths?
+ op->fInfo.fNonSquare =
+ fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
+ fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
+ fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) >
+ 1e-2f; // Diff. lengths?
} else {
SkMatrix shapeMatrix(viewMatrix);
shapeMatrix.preTranslate(tx, ty);
m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
// Send the perspective column as a param.
- batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
- shapeMatrix[SkMatrix::kMPersp2]);
- batch->fInfo.fHasPerspective = true;
+ op->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
+ shapeMatrix[SkMatrix::kMPersp2]);
+ op->fInfo.fHasPerspective = true;
- batch->setBounds(bounds, aaBloat, zeroArea);
- batch->fInfo.fNonSquare = true;
+ op->setBounds(bounds, aaBloat, zeroArea);
+ op->fInfo.fNonSquare = true;
}
instance.fColor = color;
const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
- batch->fPixelLoad = batch->bounds().height() * batch->bounds().width();
- return batch;
+ op->fPixelLoad = op->bounds().height() * op->bounds().width();
+ return op;
}
inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa,
return false;
}
-InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir)
- : INHERITED(classID),
- fInstancedRendering(ir),
- fIsTracked(false),
- fNumDraws(1),
- fNumChangesInGeometry(0) {
+InstancedRendering::Op::Op(uint32_t classID, InstancedRendering* ir)
+ : INHERITED(classID)
+ , fInstancedRendering(ir)
+ , fIsTracked(false)
+ , fNumDraws(1)
+ , fNumChangesInGeometry(0) {
fHeadDraw = fTailDraw = fInstancedRendering->fDrawPool.allocate();
#ifdef SK_DEBUG
fHeadDraw->fGeometry = {-1, 0};
fHeadDraw->fNext = nullptr;
}
-InstancedRendering::Batch::~Batch() {
+InstancedRendering::Op::~Op() {
if (fIsTracked) {
- fInstancedRendering->fTrackedBatches.remove(this);
+ fInstancedRendering->fTrackedOps.remove(this);
}
Draw* draw = fHeadDraw;
}
}
-void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) {
+void InstancedRendering::Op::appendRRectParams(const SkRRect& rrect) {
SkASSERT(!fIsTracked);
switch (rrect.getType()) {
case SkRRect::kSimple_Type: {
}
}
-void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int count) {
+void InstancedRendering::Op::appendParamsTexel(const SkScalar* vals, int count) {
SkASSERT(!fIsTracked);
SkASSERT(count <= 4 && count >= 0);
const float* valsAsFloats = vals; // Ensure SkScalar == float.
fInfo.fHasParams = true;
}
-void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
+void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
SkASSERT(!fIsTracked);
ParamsTexel& texel = fParams.push_back();
texel.fX = SkScalarToFloat(x);
fInfo.fHasParams = true;
}
-void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
+void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
SkASSERT(!fIsTracked);
ParamsTexel& texel = fParams.push_back();
texel.fX = SkScalarToFloat(x);
fInfo.fHasParams = true;
}
-void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutput* color,
- GrInitInvariantOutput* coverage,
- GrBatchToXPOverrides* overrides) const {
+void InstancedRendering::Op::computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const {
color->setKnownFourComponents(this->getSingleInstance().fColor);
if (AntialiasMode::kCoverage == fInfo.fAntialiasMode ||
}
}
-void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+void InstancedRendering::Op::initBatchTracker(const GrXPOverridesForBatch& overrides) {
Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
SkASSERT(draw.fGeometry.isEmpty());
SkASSERT(SkIsPow2(fInfo.fShapeTypes));
fInfo.fUsesLocalCoords = overrides.readsLocalCoords();
fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage();
- fInstancedRendering->fTrackedBatches.addToTail(this);
+ fInstancedRendering->fTrackedOps.addToTail(this);
fIsTracked = true;
}
-bool InstancedRendering::Batch::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
- Batch* that = static_cast<Batch*>(other);
+bool InstancedRendering::Op::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
+ Op* that = static_cast<Op*>(other);
SkASSERT(fInstancedRendering == that->fInstancedRendering);
SkASSERT(fTailDraw);
SkASSERT(that->fTailDraw);
- if (!BatchInfo::CanCombine(fInfo, that->fInfo) ||
- !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
- *that->pipeline(), that->bounds(), caps)) {
+ if (!OpInfo::CanCombine(fInfo, that->fInfo) ||
+ !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
return false;
}
- BatchInfo combinedInfo = fInfo | that->fInfo;
+ OpInfo combinedInfo = fInfo | that->fInfo;
if (!combinedInfo.isSimpleRects()) {
// This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
// There seems to be a wide range where it doesn't matter if we combine or not. What matters
fInfo = combinedInfo;
fPixelLoad += that->fPixelLoad;
- // Adopt the other batch's draws.
+ // Adopt the other op's draws.
fNumDraws += that->fNumDraws;
fNumChangesInGeometry += that->fNumChangesInGeometry;
if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
SkASSERT(State::kRecordingDraws == fState);
fState = State::kFlushing;
- if (fTrackedBatches.isEmpty()) {
+ if (fTrackedOps.isEmpty()) {
return;
}
this->onBeginFlush(rp);
}
-void InstancedRendering::Batch::onDraw(GrOpFlushState* state, const SkRect& bounds) {
+void InstancedRendering::Op::onDraw(GrOpFlushState* state, const SkRect& bounds) {
SkASSERT(State::kFlushing == fInstancedRendering->fState);
SkASSERT(state->gpu() == fInstancedRendering->gpu());
}
void InstancedRendering::endFlush() {
- // The caller is expected to delete all tracked batches (i.e. batches whose initBatchTracker
+ // The caller is expected to delete all tracked ops (i.e. ops whose initBatchTracker
// method has been called) before ending the flush.
- SkASSERT(fTrackedBatches.isEmpty());
+ SkASSERT(fTrackedOps.isEmpty());
fParams.reset();
fParamsBuffer.reset();
this->onEndFlush();
/**
* This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
- * instanced draws into one location, and creates special batches that pull from this data. The
- * nature of instanced rendering allows these batches to combine well and render efficiently.
+ * instanced draws into one location, and creates special ops that pull from this data. The nature
+ * of instanced rendering allows these ops to combine well and render efficiently.
*
* During a flush, this class assembles the accumulated draw data into a single vertex and texel
- * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs.
+ * buffer, and its subclass draws the ops using backend-specific instanced rendering APIs.
*
* This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
* InstanceProcessor.
GrGpu* gpu() const { return fGpu.get(); }
/**
- * These methods make a new record internally for an instanced draw, and return a batch that is
- * effectively just an index to that record. The returned batch is not self-contained, but
+ * These methods make a new record internally for an instanced draw, and return an op that is
+ * effectively just an index to that record. The returned op is not self-contained, but
* rather relies on this class to handle the rendering. The client must call beginFlush() on
- * this class before attempting to flush batches returned by it. It is invalid to record new
+ * this class before attempting to flush ops returned by it. It is invalid to record new
* draws between beginFlush() and endFlush().
*/
- GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
- GrAA, const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, GrAA,
+ const GrInstancedPipelineInfo&, GrAAType*);
- GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
- const SkRect& localRect, GrAA,
- const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
+ const SkRect& localRect, GrAA,
+ const GrInstancedPipelineInfo&, GrAAType*);
- GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
- const SkMatrix& localMatrix, GrAA,
- const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
+ const SkMatrix& localMatrix, GrAA,
+ const GrInstancedPipelineInfo&, GrAAType*);
- GrDrawOp* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor,
- GrAA, const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, GrAA,
+ const GrInstancedPipelineInfo&, GrAAType*);
- GrDrawOp* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
- GrAA, const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
+ GrAA, const GrInstancedPipelineInfo&,
+ GrAAType*);
- GrDrawOp* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
- const SkMatrix&, GrColor, GrAA,
- const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkMatrix&, GrColor, GrAA,
+ const GrInstancedPipelineInfo&, GrAAType*);
/**
* Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
- * batches created by this class.
+ * ops created by this class.
*/
void beginFlush(GrResourceProvider*);
/**
- * Called once the batches created previously by this class have all been released. Allows the
+ * Called once the ops created previously by this class have all been released. Allows the
* client to begin recording draws again.
*/
void endFlush();
void resetGpuResources(ResetType);
protected:
- class Batch : public GrDrawOp {
+ class Op : public GrDrawOp {
public:
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Op);
- ~Batch() override;
- const char* name() const override { return "Instanced Batch"; }
+ ~Op() override;
+ const char* name() const override { return "InstancedRendering::Op"; }
SkString dumpInfo() const override {
SkString string;
void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
protected:
- Batch(uint32_t classID, InstancedRendering* ir);
+ Op(uint32_t classID, InstancedRendering* ir);
void initBatchTracker(const GrXPOverridesForBatch&) override;
bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
void onPrepare(GrOpFlushState*) override {}
void onDraw(GrOpFlushState*, const SkRect& bounds) override;
- InstancedRendering* const fInstancedRendering;
- BatchInfo fInfo;
- SkScalar fPixelLoad;
- SkSTArray<5, ParamsTexel, true> fParams;
- bool fIsTracked;
- int fNumDraws;
- int fNumChangesInGeometry;
- Draw* fHeadDraw;
- Draw* fTailDraw;
+ InstancedRendering* const fInstancedRendering;
+ OpInfo fInfo;
+ SkScalar fPixelLoad;
+ SkSTArray<5, ParamsTexel, true> fParams;
+ bool fIsTracked;
+ int fNumDraws;
+ int fNumChangesInGeometry;
+ Draw* fHeadDraw;
+ Draw* fTailDraw;
typedef GrDrawOp INHERITED;
friend class InstancedRendering;
};
- typedef SkTInternalLList<Batch> BatchList;
+ typedef SkTInternalLList<Op> OpList;
InstancedRendering(GrGpu* gpu);
- const BatchList& trackedBatches() const { return fTrackedBatches; }
+ const OpList& trackedOps() const { return fTrackedOps; }
const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); }
const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); }
virtual void onBeginFlush(GrResourceProvider*) = 0;
- virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0;
+ virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) = 0;
virtual void onEndFlush() = 0;
virtual void onResetGpuResources(ResetType) = 0;
kFlushing
};
- Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
- const SkMatrix& viewMatrix, GrColor,
- const SkRect& localRect, GrAA aa,
- const GrInstancedPipelineInfo&, GrAAType*);
+ sk_sp<Op> SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
+ const SkMatrix& viewMatrix, GrColor,
+ const SkRect& localRect, GrAA aa,
+ const GrInstancedPipelineInfo&, GrAAType*);
bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&,
GrAAType*, AntialiasMode*);
- virtual Batch* createBatch() = 0;
+ virtual sk_sp<Op> makeOp() = 0;
- const sk_sp<GrGpu> fGpu;
- State fState;
- GrObjectMemoryPool<Batch::Draw> fDrawPool;
- SkSTArray<1024, ParamsTexel, true> fParams;
- BatchList fTrackedBatches;
- sk_sp<const GrBuffer> fVertexBuffer;
- sk_sp<const GrBuffer> fIndexBuffer;
- sk_sp<GrBuffer> fParamsBuffer;
+ const sk_sp<GrGpu> fGpu;
+ State fState;
+ GrObjectMemoryPool<Op::Draw> fDrawPool;
+ SkSTArray<1024, ParamsTexel, true> fParams;
+ OpList fTrackedOps;
+ sk_sp<const GrBuffer> fVertexBuffer;
+ sk_sp<const GrBuffer> fIndexBuffer;
+ sk_sp<GrBuffer> fParamsBuffer;
};
}
GR_STATIC_ASSERT(4 * 4 == sizeof(ParamsTexel));
/**
- * Tracks all information needed in order to draw a batch of instances. This struct also serves
- * as an all-in-one shader key for the batch.
+ * Tracks all information needed in order to draw a op of instances. This struct also serves
+ * as an all-in-one shader key for the op.
*/
-struct BatchInfo {
- BatchInfo() : fData(0) {}
- explicit BatchInfo(uint32_t data) : fData(data) {}
+struct OpInfo {
+ OpInfo() : fData(0) {}
+ explicit OpInfo(uint32_t data) : fData(data) {}
- static bool CanCombine(const BatchInfo& a, const BatchInfo& b);
+ static bool CanCombine(const OpInfo& a, const OpInfo& b);
bool isSimpleRects() const {
return !((fShapeTypes & ~kRect_ShapeFlag) | fInnerShapeTypes);
};
};
-inline bool BatchInfo::CanCombine(const BatchInfo& a, const BatchInfo& b) {
+inline bool OpInfo::CanCombine(const OpInfo& a, const OpInfo& b) {
if (a.fAntialiasMode != b.fAntialiasMode) {
return false;
}
return true;
}
-inline BatchInfo operator|(const BatchInfo& a, const BatchInfo& b) {
- SkASSERT(BatchInfo::CanCombine(a, b));
- return BatchInfo(a.fData | b.fData);
+inline OpInfo operator|(const OpInfo& a, const OpInfo& b) {
+ SkASSERT(OpInfo::CanCombine(a, b));
+ return OpInfo(a.fData | b.fData);
}
// This is required since all the data must fit into 32 bits of a shader key.
-GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(BatchInfo));
+GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(OpInfo));
GR_STATIC_ASSERT(kNumShapeTypes <= 8);
struct IndexRange {