BezierCubicOrConicTestBatch(const GrGeometryProcessor* gp, const Geometry& geo,
const SkScalar klmEqs[9], SkScalar sign)
: INHERITED(gp, geo.fBounds) {
+ this->initClassID<BezierCubicOrConicTestBatch>();
for (int i = 0; i < 9; i++) {
fKlmEqs[i] = klmEqs[i];
}
: INHERITED(gp, geo.fBounds)
, fGeometry(geo)
, fDevToUV(devToUV) {
+ this->initClassID<BezierQuadTestBatch>();
}
struct Vertex {
ConvexPolyTestBatch(const GrGeometryProcessor* gp, const Geometry& geo)
: INHERITED(gp, geo.fBounds)
, fGeometry(geo) {
+ this->initClassID<ConvexPolyTestBatch>();
}
Geometry* geoData(int index) override {
#include <new>
#include "GrBatchTarget.h"
#include "GrGeometryProcessor.h"
+#include "GrNonAtomicRef.h"
#include "GrVertices.h"
#include "SkAtomics.h"
-#include "SkRefCnt.h"
#include "SkTypes.h"
class GrGpu;
* information will be communicated to the GrBatch prior to geometry generation.
*/
-class GrBatch : public SkRefCnt {
+class GrBatch : public GrNonAtomicRef {
public:
-
GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
virtual ~GrBatch() {}
return false;
}
+ if (!this->pipeline()->isEqual(*that->pipeline())) {
+ return false;
+ }
+
return this->onCombineIfPossible(that);
}
SkDEBUGCODE(bool isUsed() const { return fUsed; })
+ void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipeline)); }
+
protected:
template <typename PROC_SUBCLASS> void initClassID() {
static uint32_t kClassID = GenClassID();
fClassID = kClassID;
}
- uint32_t fClassID;
-
// NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
// rect because we outset it for dst copy textures
void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
return fBounds.joinPossiblyEmptyRect(otherBounds);
}
+ const GrPipeline* pipeline() const { return fPipeline; }
+
/** Helper for rendering instances using an instanced index index buffer. This class creates the
space for the vertices and flushes the draws to the batch target.*/
class InstancedHelper {
typedef InstancedHelper INHERITED;
};
+ uint32_t fClassID;
SkRect fBounds;
private:
enum {
kIllegalBatchClassID = 0,
};
+ SkAutoTUnref<const GrPipeline> fPipeline;
static int32_t gCurrBatchClassID;
-
- SkDEBUGCODE(bool fUsed;)
-
int fNumberOfDraws;
+ SkDEBUGCODE(bool fUsed;)
typedef SkRefCnt INHERITED;
};
#include "GrColor.h"
#include "GrGpu.h"
+#include "GrNonAtomicRef.h"
#include "GrPendingFragmentStage.h"
#include "GrPrimitiveProcessor.h"
#include "GrProgramDesc.h"
* Class that holds an optimized version of a GrPipelineBuilder. It is meant to be an immutable
* class, and contains all data needed to set the state for a gpu draw.
*/
-class GrPipeline {
+class GrPipeline : public GrNonAtomicRef {
public:
-
-
GrPipeline(const GrPipelineBuilder&,
const GrProcOptInfo& colorPOI,
const GrProcOptInfo& coveragePOI,
// Experimentally we have found that most batching occurs within the first 10 comparisons.
static const int kMaxLookback = 10;
int i = 0;
+ batch->setPipeline(state->getPipeline());
if (!this->cmdBuffer()->empty()) {
GrTargetCommands::CmdBuffer::ReverseIter reverseIter(*this->cmdBuffer());
if (Cmd::kDrawBatch_CmdType == reverseIter->type()) {
DrawBatch* previous = static_cast<DrawBatch*>(reverseIter.get());
- if (previous->fState->getPipeline()->isEqual(*state->getPipeline()) &&
- previous->fBatch->combineIfPossible(batch)) {
+ if (previous->fBatch->combineIfPossible(batch)) {
return NULL;
}