--- /dev/null
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "gm.h"
+#include "SkBitmap.h"
+#include "SkRandom.h"
+#include "SkShader.h"
+#include "SkXfermode.h"
+
+namespace skiagm {
+
+/**
+ * Renders overlapping shapes with colorburn against a checkerboard.
+ */
+class DstReadShuffle : public GM {
+public:
+ DstReadShuffle() {
+ this->setBGColor(SkColorSetARGB(0xff, 0xff, 0, 0xff));
+ }
+
+protected:
+ enum ShapeType {
+ kCircle_ShapeType,
+ kRoundRect_ShapeType,
+ kRect_ShapeType,
+ kConvexPath_ShapeType,
+ kConcavePath_ShapeType,
+ kText_ShapeType,
+ kNumShapeTypes
+ };
+
+ SkString onShortName() SK_OVERRIDE {
+ return SkString("dstreadshuffle");
+ }
+
+ SkISize onISize() SK_OVERRIDE {
+ return SkISize::Make(kWidth, kHeight);
+ }
+
+ void drawShape(SkCanvas* canvas,
+ SkPaint* paint,
+ ShapeType type) {
+ static const SkRect kRect = SkRect::MakeXYWH(SkIntToScalar(-50), SkIntToScalar(-50),
+ SkIntToScalar(75), SkIntToScalar(105));
+ switch (type) {
+ case kCircle_ShapeType:
+ canvas->drawCircle(0, 0, 50, *paint);
+ break;
+ case kRoundRect_ShapeType:
+ canvas->drawRoundRect(kRect, SkIntToScalar(10), SkIntToScalar(20), *paint);
+ break;
+ case kRect_ShapeType:
+ canvas->drawRect(kRect, *paint);
+ break;
+ case kConvexPath_ShapeType:
+ if (fConvexPath.isEmpty()) {
+ SkPoint points[4];
+ kRect.toQuad(points);
+ fConvexPath.moveTo(points[0]);
+ fConvexPath.quadTo(points[1], points[2]);
+ fConvexPath.quadTo(points[3], points[0]);
+ SkASSERT(fConvexPath.isConvex());
+ }
+ canvas->drawPath(fConvexPath, *paint);
+ break;
+ case kConcavePath_ShapeType:
+ if (fConcavePath.isEmpty()) {
+ SkPoint points[5] = {{0, SkIntToScalar(-50)} };
+ SkMatrix rot;
+ rot.setRotate(SkIntToScalar(360) / 5);
+ for (int i = 1; i < 5; ++i) {
+ rot.mapPoints(points + i, points + i - 1, 1);
+ }
+ fConcavePath.moveTo(points[0]);
+ for (int i = 0; i < 5; ++i) {
+ fConcavePath.lineTo(points[(2 * i) % 5]);
+ }
+ fConcavePath.setFillType(SkPath::kEvenOdd_FillType);
+ SkASSERT(!fConcavePath.isConvex());
+ }
+ canvas->drawPath(fConcavePath, *paint);
+ break;
+ case kText_ShapeType: {
+ const char* text = "Hello!";
+ paint->setTextSize(30);
+ canvas->drawText(text, strlen(text), 0, 0, *paint);
+ }
+ default:
+ break;
+ }
+ }
+
+ static SkColor GetColor(SkRandom* random, int i) {
+ SkColor color;
+ switch (i) {
+ case 0:
+ color = SK_ColorTRANSPARENT;
+ break;
+ case 1:
+ color = SkColorSetARGB(0xff,
+ random->nextULessThan(256),
+ random->nextULessThan(256),
+ random->nextULessThan(256));
+ break;
+ default:
+ uint8_t alpha = random->nextULessThan(256);
+ color = SkColorSetARGB(alpha,
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha));
+ break;
+ }
+ return color;
+ }
+
+ static void SetStyle(SkPaint* p, int style, int width) {
+ switch (style) {
+ case 0:
+ p->setStyle(SkPaint::kStroke_Style);
+ p->setStrokeWidth((SkScalar)width);
+ break;
+ case 1:
+ p->setStyle(SkPaint::kStrokeAndFill_Style);
+ p->setStrokeWidth((SkScalar)width);
+ break;
+ default:
+ p->setStyle(SkPaint::kFill_Style);
+ break;
+ }
+ }
+
+ void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+ SkRandom random;
+ SkScalar y = 100;
+ for (int i = 0; i < kNumShapeTypes; i++) {
+ ShapeType shapeType = static_cast<ShapeType>(i);
+ SkScalar x = 25;
+ for (int style = 0; style < 3; style++) {
+ for (int width = 0; width <= 1; width++) {
+ for (int alpha = 0; alpha <= 2; alpha++) {
+ for (int r = 0; r <= 5; r++) {
+ SkColor color = GetColor(&random, alpha);
+
+ SkPaint p;
+ p.setAntiAlias(true);
+ p.setColor(color);
+ p.setXfermodeMode(r % 3 == 0 ? SkXfermode::kHardLight_Mode :
+ SkXfermode::kSrcOver_Mode);
+ SetStyle(&p, style, width);
+ canvas->save();
+ canvas->translate(x, y);
+ canvas->rotate((SkScalar)(r < 3 ? 10 : 0));
+ this->drawShape(canvas, &p, shapeType);
+ canvas->restore();
+ x += 8;
+ }
+ }
+ }
+ }
+ y += 50;
+ }
+ }
+
+private:
+ enum {
+ kNumShapes = 100,
+ };
+ SkAutoTUnref<SkShader> fBG;
+ SkPath fConcavePath;
+ SkPath fConvexPath;
+ static const int kWidth = 900;
+ static const int kHeight = 400;
+ typedef GM INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+static GM* MyFactory(void*) { return new DstReadShuffle; }
+static GMRegistry reg(MyFactory);
+
+}
'../gm/copyTo4444.cpp',
'../gm/cubicpaths.cpp',
'../gm/cmykjpeg.cpp',
+ '../gm/dstreadshuffle.cpp',
'../gm/degeneratesegments.cpp',
'../gm/dcshader.cpp',
'../gm/discard.cpp',
'../gm/drawlooper.cpp',
'../gm/dropshadowimagefilter.cpp',
'../gm/drrect.cpp',
+ '../gm/dstreadshuffle.cpp',
'../gm/etc1bitmap.cpp',
'../gm/extractbitmap.cpp',
'../gm/emboss.cpp',
'<(skia_src_path)/gpu/GrAllocator.h',
'<(skia_src_path)/gpu/GrAtlas.cpp',
'<(skia_src_path)/gpu/GrAtlas.h',
+ '<(skia_src_path)/gpu/GrBatch.cpp',
+ '<(skia_src_path)/gpu/GrBatch.h',
+ '<(skia_src_path)/gpu/GrBatchTarget.cpp',
+ '<(skia_src_path)/gpu/GrBatchTarget.h',
'<(skia_src_path)/gpu/GrBitmapTextContext.cpp',
'<(skia_src_path)/gpu/GrBitmapTextContext.h',
'<(skia_src_path)/gpu/GrBlend.cpp',
'<(skia_src_path)/gpu/GrFontScaler.cpp',
'<(skia_src_path)/gpu/GrFontScaler.h',
'<(skia_src_path)/gpu/GrGeometryBuffer.h',
- '<(skia_src_path)/gpu/GrGeometryData.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.cpp',
'<(skia_src_path)/gpu/GrGlyph.h',
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
*/
#include "GrAARectRenderer.h"
+#include "GrBatch.h"
+#include "GrBatchTarget.h"
+#include "GrBufferAllocPool.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrGeometryProcessor.h"
#include "GrGpu.h"
///////////////////////////////////////////////////////////////////////////////
-namespace {
-// Should the coverage be multiplied into the color attrib or use a separate attrib.
-enum CoverageAttribType {
- kUseColor_CoverageAttribType,
- kUseCoverage_CoverageAttribType,
-};
-}
-
-static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelineBuilder,
- GrColor color,
- CoverageAttribType* type,
- const SkMatrix& localMatrix) {
- uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
- const GrGeometryProcessor* gp;
- if (pipelineBuilder.canTweakAlphaForCoverage()) {
- gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
- SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
- *type = kUseColor_CoverageAttribType;
- } else {
- flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
- gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
- GrColorIsOpaque(color));
- SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
- *type = kUseCoverage_CoverageAttribType;
- }
- return gp;
-}
-
static void set_inset_fan(SkPoint* pts, size_t stride,
const SkRect& r, SkScalar dx, SkScalar dy) {
pts->setRectFan(r.fLeft + dx, r.fTop + dy,
r.fRight - dx, r.fBottom - dy, stride);
}
-void GrAARectRenderer::reset() {
- SkSafeSetNull(fAAFillRectIndexBuffer);
- SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
- SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
-}
-
static const uint16_t gFillAARectIdx[] = {
0, 1, 5, 5, 4, 0,
1, 2, 6, 6, 5, 1,
static const int kVertsPerAAFillRect = 8;
static const int kNumAAFillRectsInIndexBuffer = 256;
+static const GrGeometryProcessor* create_fill_rect_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& localMatrix) {
+ uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
+ const GrGeometryProcessor* gp;
+ if (tweakAlphaForCoverage) {
+ gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
+ false, 0xff);
+ } else {
+ flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
+ gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
+ false, 0xff);
+ }
+ return gp;
+}
+
+class AAFillRectBatch : public GrBatch {
+public:
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ SkRect fDevRect;
+ };
+
+ static GrBatch* Create(const Geometry& geometry, const GrIndexBuffer* indexBuffer) {
+ return SkNEW_ARGS(AAFillRectBatch, (geometry, indexBuffer));
+ }
+
+ const char* name() const SK_OVERRIDE { return "AAFillRectBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ // When this is called on a batch, there is only one geometry bundle
+ if (!this->canTweakAlphaForCoverage() && GrColorIsOpaque(fGeoData[0].fColor)) {
+ out->setUnknownOpaqueFourComponents();
+ } else {
+ out->setUnknownFourComponents();
+ }
+ }
+
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ if (this->canTweakAlphaForCoverage()) {
+ // uniform coverage
+ out->setKnownSingleComponent(0xff);
+ } else {
+ out->setUnknownSingleComponent();
+ }
+ }
+
+ void initBatchOpt(const GrBatchOpt& batchOpt) {
+ fBatchOpt = batchOpt;
+ }
+
+ void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
+ // Handle any color overrides
+ if (init.fColorIgnored) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ } else if (GrColor_ILLEGAL != init.fOverrideColor) {
+ fGeoData[0].fColor = init.fOverrideColor;
+ }
+
+ // setup batch properties
+ fBatch.fColorIgnored = init.fColorIgnored;
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
+ fBatch.fCoverageIgnored = init.fCoverageIgnored;
+ }
+
+ void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ SkMatrix localMatrix;
+ if (!this->viewMatrix().invert(&localMatrix)) {
+ SkDebugf("Cannot invert\n");
+ return;
+ }
+
+ const GrGeometryProcessor* gp = create_fill_rect_gp(canTweakAlphaForCoverage,
+ localMatrix);
+
+ batchTarget->initDraw(gp, pipeline);
+ gp->unref();
+
+ // TODO this is hacky, but the only way we have to initialize the GP is to use the
+ // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch
+ // everywhere we can remove this nastiness
+ GrPipelineInfo init;
+ init.fColorIgnored = fBatch.fColorIgnored;
+ init.fOverrideColor = GrColor_ILLEGAL;
+ init.fCoverageIgnored = fBatch.fCoverageIgnored;
+ init.fUsesLocalCoords = this->usesLocalCoords();
+ gp->initBatchTracker(batchTarget->currentBatchTracker(), init);
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ int instanceCount = fGeoData.count();
+ int vertexCount = kVertsPerAAFillRect * instanceCount;
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
+ vertexCount,
+ &vertexBuffer,
+ &firstVertex);
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ this->generateAAFillRectGeometry(vertices,
+ i * kVertsPerAAFillRect * vertexStride,
+ vertexStride,
+ args.fColor,
+ args.fViewMatrix,
+ args.fRect,
+ args.fDevRect,
+ canTweakAlphaForCoverage);
+ }
+
+ GrDrawTarget::DrawInfo drawInfo;
+ drawInfo.setPrimitiveType(kTriangles_GrPrimitiveType);
+ drawInfo.setStartVertex(0);
+ drawInfo.setStartIndex(0);
+ drawInfo.setVerticesPerInstance(kVertsPerAAFillRect);
+ drawInfo.setIndicesPerInstance(kIndicesPerAAFillRect);
+ drawInfo.adjustStartVertex(firstVertex);
+ drawInfo.setVertexBuffer(vertexBuffer);
+ drawInfo.setIndexBuffer(fIndexBuffer);
+
+ int maxInstancesPerDraw = kNumAAFillRectsInIndexBuffer;
+
+ while (instanceCount) {
+ drawInfo.setInstanceCount(SkTMin(instanceCount, maxInstancesPerDraw));
+ drawInfo.setVertexCount(drawInfo.instanceCount() * drawInfo.verticesPerInstance());
+ drawInfo.setIndexCount(drawInfo.instanceCount() * drawInfo.indicesPerInstance());
+
+ batchTarget->draw(drawInfo);
+
+ drawInfo.setStartVertex(drawInfo.startVertex() + drawInfo.vertexCount());
+ instanceCount -= drawInfo.instanceCount();
+ }
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+private:
+ AAFillRectBatch(const Geometry& geometry, const GrIndexBuffer* indexBuffer)
+ : fIndexBuffer(indexBuffer) {
+ this->initClassID<AAFillRectBatch>();
+ fGeoData.push_back(geometry);
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatchOpt.fCanTweakAlphaForCoverage; }
+ bool colorIgnored() const { return fBatch.fColorIgnored; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+
+ bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
+ AAFillRectBatch* that = t->cast<AAFillRectBatch>();
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ return false;
+ }
+
+ if (this->colorIgnored() != that->colorIgnored()) {
+ return false;
+ }
+
+ if (this->usesLocalCoords() != that->usesLocalCoords()) {
+ return false;
+ }
+
+ // We apply the viewmatrix to the rect points on the cpu. However, if the pipeline uses
+ // local coords then we won't be able to batch. We could actually upload the viewmatrix
+ // using vertex attributes in these cases, but haven't investigated that
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ fBatch.fColor = GrColor_ILLEGAL;
+ }
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ return true;
+ }
+
+ void generateAAFillRectGeometry(void* vertices,
+ uint32_t offset,
+ uint32_t vertexStride,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect,
+ bool tweakAlphaForCoverage) const {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) + offset;
+
+ SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
+ SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
+
+ SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
+
+ if (viewMatrix.rectStaysRect()) {
+ set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
+ set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
+ } else {
+ // compute transformed (1, 0) and (0, 1) vectors
+ SkVector vec[2] = {
+ { viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
+ { viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
+ };
+
+ vec[0].normalize();
+ vec[0].scale(SK_ScalarHalf);
+ vec[1].normalize();
+ vec[1].scale(SK_ScalarHalf);
+
+ // create the rotated rect
+ fan0Pos->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, vertexStride);
+ viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
+
+ // Now create the inset points and then outset the original
+ // rotated points
+
+ // TL
+ *((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
+ // BL
+ *((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
+ // BR
+ *((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
+ // TR
+ *((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
+ }
+
+ // Make verts point to vertex color and then set all the color and coverage vertex attrs
+ // values.
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
+ }
+ }
+
+ int scale;
+ if (inset < SK_ScalarHalf) {
+ scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
+ SkASSERT(scale >= 0 && scale <= 255);
+ } else {
+ scale = 0xff;
+ }
+
+ verts += 4 * vertexStride;
+
+ float innerCoverage = GrNormalizeByteToFloat(scale);
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride +
+ sizeof(GrColor)) = innerCoverage;
+ }
+ }
+ }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ GrBatchOpt fBatchOpt;
+ BatchTracker fBatch;
+ const GrIndexBuffer* fIndexBuffer;
+ SkSTArray<1, Geometry, true> fGeoData;
+};
+
+namespace {
+// Should the coverage be multiplied into the color attrib or use a separate attrib.
+enum CoverageAttribType {
+ kUseColor_CoverageAttribType,
+ kUseCoverage_CoverageAttribType,
+};
+}
+
+void GrAARectRenderer::reset() {
+ SkSafeSetNull(fAAFillRectIndexBuffer);
+ SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
+ SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
+}
+
static const uint16_t gMiterStrokeAARectIdx[] = {
0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect) {
- GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
-
- SkMatrix localMatrix;
- if (!viewMatrix.invert(&localMatrix)) {
- SkDebugf("Cannot invert\n");
- return;
- }
-
- CoverageAttribType type;
- SkAutoTUnref<const GrGeometryProcessor> gp(create_rect_gp(*pipelineBuilder, color, &type,
- localMatrix));
-
- size_t vertexStride = gp->getVertexStride();
- GrDrawTarget::AutoReleaseGeometry geo(target, 8, vertexStride, 0);
- if (!geo.succeeded()) {
- SkDebugf("Failed to get space for vertices!\n");
- return;
- }
-
if (NULL == fAAFillRectIndexBuffer) {
fAAFillRectIndexBuffer = fGpu->createInstancedIndexBuffer(gFillAARectIdx,
kIndicesPerAAFillRect,
kNumAAFillRectsInIndexBuffer,
kVertsPerAAFillRect);
}
- GrIndexBuffer* indexBuffer = fAAFillRectIndexBuffer;
- if (NULL == indexBuffer) {
- SkDebugf("Failed to create index buffer!\n");
- return;
- }
-
- intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
-
- SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
- SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
-
- SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
- inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
-
- if (viewMatrix.rectStaysRect()) {
- // Temporarily #if'ed out. We don't want to pass in the devRect but
- // right now it is computed in GrContext::apply_aa_to_rect and we don't
- // want to throw away the work
-#if 0
- SkRect devRect;
- combinedMatrix.mapRect(&devRect, rect);
-#endif
-
- set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
- set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
- } else {
- // compute transformed (1, 0) and (0, 1) vectors
- SkVector vec[2] = {
- { viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
- { viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
- };
-
- vec[0].normalize();
- vec[0].scale(SK_ScalarHalf);
- vec[1].normalize();
- vec[1].scale(SK_ScalarHalf);
-
- // create the rotated rect
- fan0Pos->setRectFan(rect.fLeft, rect.fTop,
- rect.fRight, rect.fBottom, vertexStride);
- viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
-
- // Now create the inset points and then outset the original
- // rotated points
-
- // TL
- *((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
- // BL
- *((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
- // BR
- *((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
- // TR
- *((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
- }
- // Make verts point to vertex color and then set all the color and coverage vertex attrs values.
- verts += sizeof(SkPoint);
- for (int i = 0; i < 4; ++i) {
- if (kUseCoverage_CoverageAttribType == type) {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
- *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
- } else {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
- }
- }
+ AAFillRectBatch::Geometry geometry;
+ geometry.fRect = rect;
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fDevRect = devRect;
+ geometry.fColor = color;
- int scale;
- if (inset < SK_ScalarHalf) {
- scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
- SkASSERT(scale >= 0 && scale <= 255);
- } else {
- scale = 0xff;
- }
-
- verts += 4 * vertexStride;
-
- float innerCoverage = GrNormalizeByteToFloat(scale);
- GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
-
- for (int i = 0; i < 4; ++i) {
- if (kUseCoverage_CoverageAttribType == type) {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
- *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = innerCoverage;
- } else {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
- }
- }
-
- target->setIndexSourceToBuffer(indexBuffer);
- target->drawIndexedInstances(pipelineBuilder,
- gp,
- kTriangles_GrPrimitiveType,
- 1,
- kVertsPerAAFillRect,
- kIndicesPerAAFillRect);
- target->resetIndexSource();
+ SkAutoTUnref<GrBatch> batch(AAFillRectBatch::Create(geometry, fAAFillRectIndexBuffer));
+ target->drawBatch(pipelineBuilder, batch, &devRect);
}
void GrAARectRenderer::strokeAARect(GrDrawTarget* target,
devOutsideAssist.outset(0, ry);
}
- this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside, devOutsideAssist,
- devInside, miterStroke);
+ this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside,
+ devOutsideAssist, devInside, miterStroke);
}
+static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelneBuilder,
+ GrColor color,
+ CoverageAttribType* type,
+ const SkMatrix& localMatrix) {
+ uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
+ const GrGeometryProcessor* gp;
+ if (pipelneBuilder.canTweakAlphaForCoverage()) {
+ gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
+ SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
+ *type = kUseColor_CoverageAttribType;
+ } else {
+ flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
+ gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
+ GrColorIsOpaque(color));
+ SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+ *type = kUseCoverage_CoverageAttribType;
+ }
+ return gp;
+}
+
+
void GrAARectRenderer::geometryStrokeAARect(GrDrawTarget* target,
GrPipelineBuilder* pipelineBuilder,
GrColor color,
const SkRect& devOutsideAssist,
const SkRect& devInside,
bool miterStroke) {
- GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
-
SkMatrix localMatrix;
if (!viewMatrix.invert(&localMatrix)) {
SkDebugf("Cannot invert\n");
--- /dev/null
+#include "GrBatch.h"
+
+#include "GrMemoryPool.h"
+#include "SkTLS.h"
+
+// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
+// but seems to be mostly consistent. There is a lot in flux right now, but we should really
+// revisit this when batch is everywhere
+
+class GrBatch_Globals {
+public:
+ static GrMemoryPool* GetTLS() {
+ return (GrMemoryPool*)SkTLS::Get(CreateTLS, DeleteTLS);
+ }
+
+private:
+ static void* CreateTLS() {
+ return SkNEW_ARGS(GrMemoryPool, (16384, 16384));
+ }
+
+ static void DeleteTLS(void* pool) {
+ SkDELETE(reinterpret_cast<GrMemoryPool*>(pool));
+ }
+};
+
+int32_t GrBatch::gCurrBatchClassID =
+ GrBatch::kIllegalBatchClassID;
+
+void* GrBatch::operator new(size_t size) {
+ return GrBatch_Globals::GetTLS()->allocate(size);
+}
+
+void GrBatch::operator delete(void* target) {
+ GrBatch_Globals::GetTLS()->release(target);
+}
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatch_DEFINED
+#define GrBatch_DEFINED
+
+#include <new>
+// TODO remove this header when we move entirely to batch
+#include "GrGeometryProcessor.h"
+#include "SkRefCnt.h"
+#include "SkThread.h"
+#include "SkTypes.h"
+
+class GrBatchTarget;
+class GrGpu;
+class GrIndexBufferAllocPool;
+class GrPipeline;
+class GrVertexBufferAllocPool;
+
+struct GrInitInvariantOutput;
+
+/*
+ * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
+ * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
+ * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
+ * subclasses complete freedom to decide how / what they can batch.
+ *
+ * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
+ * merged using combineIfPossible. When two batches merge, one takes on the union of the data
+ * and the other is left empty. The merged batch becomes responsible for drawing the data from both
+ * the original batches.
+ *
+ * If there are any possible optimizations which might require knowing more about the full state of
+ * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
+ * information will be communicated to the GrBatch prior to geometry generation.
+ */
+
+struct GrBatchOpt {
+ bool fCanTweakAlphaForCoverage;
+};
+
+class GrBatch : public SkRefCnt {
+public:
+ SK_DECLARE_INST_COUNT(GrBatch)
+ GrBatch() { SkDEBUGCODE(fUsed = false;) }
+ virtual ~GrBatch() {}
+
+ virtual const char* name() const = 0;
+ virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
+ virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
+
+ /*
+ * initBatchOpt is used to communicate possible optimizations to the GrBatch. initBatchTracker
+ * is a hook for the some additional overrides from the GrXferProcessor. This is a bit
+ * confusing but has to be like this until GrBatch is everywhere.
+ *
+ * TODO combine to a single init call when GrBatch is everywhere.
+ */
+ virtual void initBatchOpt(const GrBatchOpt&) = 0;
+ virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
+
+ bool combineIfPossible(GrBatch* that) {
+ if (this->classID() != that->classID()) {
+ return false;
+ }
+
+ return onCombineIfPossible(that);
+ }
+
+ virtual bool onCombineIfPossible(GrBatch*) = 0;
+
+ virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
+
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ /**
+ * Helper for down-casting to a GrBatch subclass
+ */
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
+ template <typename T> T* cast() { return static_cast<T*>(this); }
+
+ uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
+
+ // TODO no GrPrimitiveProcessors yet read fragment position
+ bool willReadFragmentPosition() const { return false; }
+
+ SkDEBUGCODE(bool isUsed() const { return fUsed; })
+
+protected:
+ template <typename PROC_SUBCLASS> void initClassID() {
+ static uint32_t kClassID = GenClassID();
+ fClassID = kClassID;
+ }
+
+ uint32_t fClassID;
+
+private:
+ static uint32_t GenClassID() {
+ // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
+ // atomic inc returns the old value not the incremented value. So we add
+ // 1 to the returned value.
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap as it should only be called once for each GrBatch "
+ "subclass.");
+ }
+ return id;
+ }
+
+ enum {
+ kIllegalBatchClassID = 0,
+ };
+ static int32_t gCurrBatchClassID;
+
+ SkDEBUGCODE(bool fUsed;)
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchTarget.h"
+
+#include "GrBufferAllocPool.h"
+#include "GrPipeline.h"
+
+void GrBatchTarget::flush() {
+ FlushBuffer::Iter iter(fFlushBuffer);
+ fVertexPool->unmap();
+ fIndexPool->unmap();
+
+ while (iter.next()) {
+ GrProgramDesc desc;
+ BufferedFlush* bf = iter.get();
+ const GrPipeline* pipeline = bf->fPipeline;
+ const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
+ fGpu->buildProgramDesc(&desc, *primProc, *pipeline, pipeline->descInfo(),
+ bf->fBatchTracker);
+
+ GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
+ for (int i = 0; i < bf->fDraws.count(); i++) {
+ fGpu->draw(args, bf->fDraws[i]);
+ }
+ }
+ fFlushBuffer.reset();
+}
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchBuffer_DEFINED
+#define GrBatchBuffer_DEFINED
+
+#include "GrPendingProgramElement.h"
+#include "GrGpu.h"
+#include "GrTRecorder.h"
+
+/*
+ * GrBatch instances use this object to allocate space for their geometry and to issue the draws
+ * that render their batch.
+ */
+
+class GrBatchTarget : public SkNoncopyable {
+public:
+ GrBatchTarget(GrGpu* gpu,
+ GrVertexBufferAllocPool* vpool,
+ GrIndexBufferAllocPool* ipool)
+ : fGpu(gpu)
+ , fVertexPool(vpool)
+ , fIndexPool(ipool)
+ , fFlushBuffer(kFlushBufferInitialSizeInBytes) {}
+
+ typedef GrDrawTarget::DrawInfo DrawInfo;
+ void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
+ GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
+ }
+
+ void draw(const GrDrawTarget::DrawInfo& draw) {
+ fFlushBuffer.back().fDraws.push_back(draw);
+ }
+ void flush();
+
+ // TODO This goes away when everything uses batch
+ GrBatchTracker* currentBatchTracker() {
+ SkASSERT(!fFlushBuffer.empty());
+ return &fFlushBuffer.back().fBatchTracker;
+ }
+
+ GrVertexBufferAllocPool* vertexPool() { return fVertexPool; }
+ GrIndexBufferAllocPool* indexPool() { return fIndexPool; }
+
+private:
+ GrGpu* fGpu;
+ GrVertexBufferAllocPool* fVertexPool;
+ GrIndexBufferAllocPool* fIndexPool;
+
+ typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
+
+ struct BufferedFlush {
+ BufferedFlush(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline)
+ : fPrimitiveProcessor(primProc)
+ , fPipeline(pipeline)
+ , fDraws(kDrawRecorderInitialSizeInBytes) {}
+ typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
+ ProgramPrimitiveProcessor fPrimitiveProcessor;
+ const GrPipeline* fPipeline;
+ GrBatchTracker fBatchTracker;
+ SkSTArray<4, DrawInfo, true> fDraws;
+ };
+
+ enum {
+ kFlushBufferInitialSizeInBytes = 8 * sizeof(BufferedFlush),
+ kDrawRecorderInitialSizeInBytes = 8 * sizeof(DrawInfo),
+ };
+
+ typedef GrTRecorder<BufferedFlush, TBufferAlign> FlushBuffer;
+
+ FlushBuffer fFlushBuffer;
+};
+
+#endif
const Attribute* inCoverage() const { return fInCoverage; }
uint8_t coverage() const { return fCoverage; }
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
* found in the LICENSE file.
*/
-
-
#include "GrDrawTarget.h"
+
+#include "GrBatch.h"
#include "GrContext.h"
#include "GrDrawTargetCaps.h"
#include "GrPath.h"
}
}
+
+void GrDrawTarget::drawBatch(GrPipelineBuilder* pipelineBuilder,
+ GrBatch* batch,
+ const SkRect* devBounds) {
+ SkASSERT(pipelineBuilder);
+ // TODO some kind of checkdraw, but not at this level
+
+ // Setup clip
+ GrScissorState scissorState;
+ GrPipelineBuilder::AutoRestoreEffects are;
+ GrPipelineBuilder::AutoRestoreStencil ars;
+ if (!this->setupClip(pipelineBuilder, &are, &ars, &scissorState, devBounds)) {
+ return;
+ }
+
+ GrDeviceCoordTexture dstCopy;
+ if (!this->setupDstReadIfNecessary(pipelineBuilder, &dstCopy, devBounds)) {
+ return;
+ }
+
+ this->onDrawBatch(batch, *pipelineBuilder, scissorState, dstCopy.texture() ? &dstCopy : NULL);
+}
+
static const GrStencilSettings& winding_path_stencil_settings() {
GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings,
kIncClamp_StencilOp,
#include "SkTypes.h"
#include "SkXfermode.h"
+class GrBatch;
class GrClipData;
class GrDrawTargetCaps;
class GrPath;
int vertexCount,
const SkRect* devBounds = NULL);
+ // TODO devbounds should live on the batch
+ void drawBatch(GrPipelineBuilder*,
+ GrBatch*,
+ const SkRect* devBounds = NULL);
+
/**
* Draws path into the stencil buffer. The fill must be either even/odd or
* winding (not inverse or hairline). It will respect the HW antialias flag
* that rectangle before it is input to GrCoordTransforms that read local
* coordinates
*/
- void drawRect(GrPipelineBuilder* ds,
+ void drawRect(GrPipelineBuilder* pipelineBuilder,
GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect* localRect,
const SkMatrix* localMatrix) {
AutoGeometryPush agp(this);
- this->onDrawRect(ds, color, viewMatrix, rect, localRect, localMatrix);
+ this->onDrawRect(pipelineBuilder, color, viewMatrix, rect, localRect, localMatrix);
}
/**
*/
class DrawInfo {
public:
+ DrawInfo() { fDevBounds = NULL; }
DrawInfo(const DrawInfo& di) { (*this) = di; }
DrawInfo& operator =(const DrawInfo& di);
int indicesPerInstance() const { return fIndicesPerInstance; }
int instanceCount() const { return fInstanceCount; }
+ void setPrimitiveType(GrPrimitiveType type) { fPrimitiveType = type; }
+ void setStartVertex(int startVertex) { fStartVertex = startVertex; }
+ void setStartIndex(int startIndex) { fStartIndex = startIndex; }
+ void setVertexCount(int vertexCount) { fVertexCount = vertexCount; }
+ void setIndexCount(int indexCount) { fIndexCount = indexCount; }
+ void setVerticesPerInstance(int verticesPerI) { fVerticesPerInstance = verticesPerI; }
+ void setIndicesPerInstance(int indicesPerI) { fIndicesPerInstance = indicesPerI; }
+ void setInstanceCount(int instanceCount) { fInstanceCount = instanceCount; }
+
bool isIndexed() const { return fIndexCount > 0; }
#ifdef SK_DEBUG
bool isInstanced() const; // this version is longer because of asserts
const SkRect* getDevBounds() const { return fDevBounds; }
private:
- DrawInfo() { fDevBounds = NULL; }
-
friend class GrDrawTarget;
GrPrimitiveType fPrimitiveType;
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) = 0;
+ virtual void onDrawBatch(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy) = 0;
// TODO copy in order drawbuffer onDrawRect to here
virtual void onDrawRect(GrPipelineBuilder*,
GrColor color,
GrGpu* getGpu() { return fGpu; }
const GrGpu* getGpu() const{ return fGpu; }
+ GrVertexBufferAllocPool* getVertexAllocPool() { return fVertexPool; }
+ GrIndexBufferAllocPool* getIndexAllocPool() { return fIndexPool; }
+
private:
enum {
kGeoPoolStatePreAllocCnt = 4,
+++ /dev/null
-/*
- * Copyright 2014 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGeometryData_DEFINED
-#define GrGeometryData_DEFINED
-
-#include <new>
-#include "SkTypes.h"
-
-/*
- * A super lightweight base class for GeometryProcessor's to use to store draw data in a reorderable
- * fashion. Its most important feature is a pool allocator. Its virtual, but only so subclasses
- * will have their destructors called.
- */
-
-class GrGeometryData : SkNoncopyable {
-public:
- virtual ~GrGeometryData() {}
-
- /**
- * Helper for down-casting to a GrGeometryData subclass
- */
- template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
-
- void* operator new(size_t size);
-
- void operator delete(void* target);
-
- void* operator new(size_t size, void* placement) {
- return ::operator new(size, placement);
- }
-
- void operator delete(void* target, void* placement) {
- ::operator delete(target, placement);
- }
-};
-
-#endif
out->setKnownSingleComponent(0xff);
}
-void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
PathBatchTracker* local = bt->cast<PathBatchTracker>();
if (init.fColorIgnored) {
local->fInputColorType = kIgnored_GrGPInput;
#define GrGeometryProcessor_DEFINED
#include "GrColor.h"
-#include "GrGeometryData.h"
#include "GrProcessor.h"
#include "GrShaderVar.h"
/*
* A struct for tracking batching decisions. While this lives on GrOptState, it is managed
* entirely by the derived classes of the GP.
+ * // TODO this was an early attempt at handling out of order batching. It should be
+ * used carefully as it is being replaced by GrBatch
*/
class GrBatchTracker {
public:
SkAlignedSStorage<kMaxSize> fData;
};
+class GrIndexBufferAllocPool;
class GrGLCaps;
class GrGLPrimitiveProcessor;
-class GrOptDrawState;
+class GrVertexBufferAllocPool;
struct GrInitInvariantOutput;
+/*
+ * This struct allows the GrPipeline to communicate information about the pipeline. Most of this
+ * is overrides, but some of it is general information. Logically it should live in GrPipeline.h,
+ * but this is problematic due to circular dependencies.
+ */
+struct GrPipelineInfo {
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ GrColor fOverrideColor;
+ bool fUsesLocalCoords;
+};
/*
* This enum is shared by GrPrimitiveProcessors and GrGLPrimitiveProcessors to coordinate shaders
const SkMatrix& viewMatrix() const { return fViewMatrix; }
const SkMatrix& localMatrix() const { return fLocalMatrix; }
- /*
- * This struct allows the optstate to communicate requirements to the GrPrimitiveProcessor.
- */
- struct InitBT {
- bool fColorIgnored;
- bool fCoverageIgnored;
- GrColor fOverrideColor;
- bool fUsesLocalCoords;
- };
-
- virtual void initBatchTracker(GrBatchTracker*, const InitBT&) const = 0;
+ virtual void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const = 0;
virtual bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,
* TODO this function changes quite a bit with deferred geometry. There the GrGeometryProcessor
* can upload a new color via attribute if needed.
*/
- static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor, const InitBT& init,
+ static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor,
+ const GrPipelineInfo& init,
bool hasVertexColor) {
if (init.fColorIgnored) {
*color = GrColor_ILLEGAL;
return SkNEW_ARGS(GrPathProcessor, (color, viewMatrix, localMatrix));
}
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,
: INHERITED(gpu, vertexPool, indexPool)
, fCmdBuffer(kCmdBufferInitialSizeInBytes)
, fPrevState(NULL)
- , fDrawID(0) {
+ , fDrawID(0)
+ , fBatchTarget(gpu, vertexPool, indexPool) {
SkASSERT(vertexPool);
SkASSERT(indexPool);
Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
if (!draw->fInfo.isInstanced() ||
+ draw->fInfo.primitiveType() != info.primitiveType() ||
draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
this->recordTraceMarkersIfNecessary();
}
+void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
+ if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, dstCopy)) {
+ return;
+ }
+
+ // Check if there is a Batch Draw we can batch with
+ if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
+ GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+ return;
+ }
+
+ DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
+ if (draw->fBatch->combineIfPossible(batch)) {
+ return;
+ } else {
+ GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+ }
+ this->recordTraceMarkersIfNecessary();
+}
+
void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
const GrPathProcessor* pathProc,
const GrPath* path,
return;
}
-
CmdBuffer::Iter iter(fCmdBuffer);
int currCmdMarker = 0;
// stream.
SetState* currentState = NULL;
+ // TODO to prevent flushing the batch buffer too much, we only flush when wasBatch && !isBatch
+ // In the long term we can delete this and just flush once at the end of all geometry generation
+ bool wasBatch = false;
+
while (iter.next()) {
GrGpuTraceMarker newMarker("", -1);
SkString traceString;
++currCmdMarker;
}
- if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
+ bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType);
+
+ if (!isSetState && kDrawBatch_Cmd != strip_trace_bit(iter->fType)) {
+ // TODO see note above, this gets deleted once everyone uses batch drawing
+ if (wasBatch) {
+ wasBatch = false;
+ fBatchTarget.flush();
+ }
+ }
+
+ if (isSetState) {
SetState* ss = reinterpret_cast<SetState*>(iter.get());
- this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fPipeline,
- ss->fPipeline.descInfo(), ss->fBatchTracker);
+ // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will
+ // only have GrBatch and we can delete this
+ if (ss->fPrimitiveProcessor) {
+ this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
+ ss->fPipeline,
+ ss->fPipeline.descInfo(),
+ ss->fBatchTracker);
+ } else {
+ wasBatch = true;
+ }
currentState = ss;
-
} else {
iter->execute(this, currentState);
}
}
}
+ // TODO see note above, one last catch
+ if (wasBatch) {
+ fBatchTarget.flush();
+ }
+
SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
++fDrawID;
}
fCount, fStencilSettings);
}
+void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const SetState* state) {
+ SkASSERT(state);
+ fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
+}
+
void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState*) {}
void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetState*) {
ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
ss->fPipeline.getInitBatchTracker());
- if (fPrevState &&
+ if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*ss->fPrimitiveProcessor,
ss->fBatchTracker) &&
return true;
}
+bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrScissorState& scissor,
+ const GrDeviceCoordTexture* dstCopy) {
+ // TODO this gets much simpler when we have batches everywhere.
+ // If the previous command is also a set state, then we check to see if it has a Batch. If so,
+ // and we can make the two batches equal, and we can combine the states, then we make them equal
+ SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
+ (batch, pipelineBuilder, *this->getGpu()->caps(), scissor,
+ dstCopy));
+ if (ss->fPipeline.mustSkip()) {
+ fCmdBuffer.pop_back();
+ return false;
+ }
+
+ batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
+
+ if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
+ fPrevState->fPipeline.isEqual(ss->fPipeline)) {
+ fCmdBuffer.pop_back();
+ } else {
+ fPrevState = ss;
+ this->recordTraceMarkersIfNecessary();
+ }
+ return true;
+}
+
void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
SkASSERT(!fCmdBuffer.empty());
SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
#define GrInOrderDrawBuffer_DEFINED
#include "GrFlushToGpuDrawTarget.h"
+
+#include "GrBatch.h"
+#include "GrBatchTarget.h"
#include "GrPipeline.h"
#include "GrPath.h"
#include "GrTRecorder.h"
private:
typedef GrGpu::DrawArgs DrawArgs;
enum {
- kDraw_Cmd = 1,
- kStencilPath_Cmd = 2,
- kSetState_Cmd = 3,
- kClear_Cmd = 4,
- kCopySurface_Cmd = 5,
- kDrawPath_Cmd = 6,
- kDrawPaths_Cmd = 7,
+ kDraw_Cmd = 1,
+ kStencilPath_Cmd = 2,
+ kSetState_Cmd = 3,
+ kClear_Cmd = 4,
+ kCopySurface_Cmd = 5,
+ kDrawPath_Cmd = 6,
+ kDrawPaths_Cmd = 7,
+ kDrawBatch_Cmd = 8,
};
struct SetState;
// TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
struct SetState : public Cmd {
+ // TODO get rid of the prim proc version of this when we use batch everywhere
SetState(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor* primProc,
const GrDrawTargetCaps& caps,
const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
, fPrimitiveProcessor(primProc)
, fPipeline(pipelineBuilder, primProc, caps, scissor, dstCopy) {}
+ SetState(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
+ : Cmd(kSetState_Cmd)
+ , fPipeline(batch, pipelineBuilder, caps, scissor, dstCopy) {}
+
void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
GrBatchTracker fBatchTracker;
};
+ struct DrawBatch : public Cmd {
+ DrawBatch(GrBatch* batch) : Cmd(kDrawBatch_Cmd), fBatch(SkRef(batch)) {
+ SkASSERT(!batch->isUsed());
+ }
+
+ void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
+
+ // TODO it wouldn't be too hard to let batches allocate in the cmd buffer
+ SkAutoTUnref<GrBatch> fBatch;
+ };
+
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
+ void onDrawBatch(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
void onDrawRect(GrPipelineBuilder*,
GrColor,
const SkMatrix& viewMatrix,
// Determines whether the current draw operation requires a new GrPipeline and if so
// records it. If the draw can be skipped false is returned and no new GrPipeline is
// recorded.
+ // TODO delete the primproc variant when we have batches everywhere
bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(const GrPipelineBuilder&,
const GrPrimitiveProcessor*,
const GrScissorState&,
const GrDeviceCoordTexture*);
+ bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture*);
+
// We lazily record clip changes in order to skip clips that have no effect.
void recordClipIfNecessary();
// Records any trace markers for a command after adding it to the buffer.
bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; }
+ GrBatchTarget* getBatchTarget() { return &fBatchTarget; }
+
// TODO: Use a single allocator for commands and records
enum {
kCmdBufferInitialSizeInBytes = 8 * 1024,
SkTDArray<char> fPathIndexBuffer;
SkTDArray<float> fPathTransformBuffer;
uint32_t fDrawID;
+ GrBatchTarget fBatchTarget;
typedef GrFlushToGpuDrawTarget INHERITED;
};
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
#include "GrPipeline.h"
+#include "GrBatch.h"
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrPipelineBuilder.h"
#include "GrXferProcessor.h"
GrPipeline::GrPipeline(const GrPipelineBuilder& pipelineBuilder,
- const GrPrimitiveProcessor* primProc,
- const GrDrawTargetCaps& caps,
- const GrScissorState& scissorState,
- const GrDeviceCoordTexture* dstCopy) {
+ const GrPrimitiveProcessor* primProc,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(primProc);
const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(primProc);
+ this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
+}
+
+GrPipeline::GrPipeline(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
+ GrBatchOpt batchOpt;
+ batchOpt.fCanTweakAlphaForCoverage = pipelineBuilder.canTweakAlphaForCoverage();
+ batch->initBatchOpt(batchOpt);
+
+ const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(batch);
+ const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(batch);
+
+ this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
+}
+
+void GrPipeline::internalConstructor(const GrPipelineBuilder& pipelineBuilder,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
// Create XferProcessor from DS's XPFactory
SkAutoTUnref<GrXferProcessor> xferProcessor(
pipelineBuilder.getXPFactory()->createXferProcessor(colorPOI, coveragePOI));
#include "SkMatrix.h"
#include "SkRefCnt.h"
+class GrBatch;
class GrDeviceCoordTexture;
-class GrPathProcessor;
class GrPipelineBuilder;
/**
public:
SK_DECLARE_INST_COUNT(GrPipeline)
+ // TODO get rid of this version of the constructor when we use batch everywhere
GrPipeline(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor*,
const GrDrawTargetCaps&, const GrScissorState&,
const GrDeviceCoordTexture* dstCopy);
+ GrPipeline(GrBatch*, const GrPipelineBuilder&, const GrDrawTargetCaps&,
+ const GrScissorState&, const GrDeviceCoordTexture* dstCopy);
+
/*
* Returns true if it is possible to combine the two GrPipelines and it will update 'this'
* to subsume 'that''s draw.
const GrProgramDesc::DescInfo& descInfo() const { return fDescInfo; }
- const GrGeometryProcessor::InitBT& getInitBatchTracker() const { return fInitBT; }
+ const GrPipelineInfo& getInitBatchTracker() const { return fInitBT; }
private:
+ // TODO we can have one constructor once GrBatch is complete
+ void internalConstructor(const GrPipelineBuilder&,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ const GrDrawTargetCaps&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy);
+
/**
* Alter the program desc and inputs (attribs and processors) based on the blend optimization.
*/
RenderTarget fRenderTarget;
GrScissorState fScissorState;
GrStencilSettings fStencilSettings;
- GrPipelineBuilder::DrawFace fDrawFace;
+ GrPipelineBuilder::DrawFace fDrawFace;
GrDeviceCoordTexture fDstCopy;
uint32_t fFlags;
ProgramXferProcessor fXferProcessor;
FragmentStageArray fFragmentStages;
GrProgramDesc::DescInfo fDescInfo;
- GrGeometryProcessor::InitBT fInitBT;
+ GrPipelineInfo fInitBT;
// This function is equivalent to the offset into fFragmentStages where coverage stages begin.
int fNumColorStages;
, fColorProcInfoValid(false)
, fCoverageProcInfoValid(false)
, fColorCache(GrColor_ILLEGAL)
- , fCoverageCache(GrColor_ILLEGAL)
- , fColorPrimProc(NULL)
- , fCoveragePrimProc(NULL) {
+ , fCoverageCache(GrColor_ILLEGAL) {
SkDEBUGCODE(fBlockEffectRemovalCnt = 0;)
}
fCoverageProcInfoValid = that.fCoverageProcInfoValid;
fColorCache = that.fColorCache;
fCoverageCache = that.fCoverageCache;
- fColorPrimProc = that.fColorPrimProc;
- fCoveragePrimProc = that.fCoveragePrimProc;
if (fColorProcInfoValid) {
fColorProcInfo = that.fColorProcInfo;
}
fColorCache = GrColor_ILLEGAL;
fCoverageCache = GrColor_ILLEGAL;
-
- fColorPrimProc = NULL;
- fCoveragePrimProc = NULL;
}
////////////////////////////////////////////////////////////////////////////////
}
void GrPipelineBuilder::calcColorInvariantOutput(const GrPrimitiveProcessor* pp) const {
- if (!fColorProcInfoValid || fColorPrimProc != pp) {
- fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
- fColorProcInfoValid = true;
- fColorPrimProc = pp;
- }
+ fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
+ fColorProcInfoValid = false;
+
}
void GrPipelineBuilder::calcCoverageInvariantOutput(const GrPrimitiveProcessor* pp) const {
- if (!fCoverageProcInfoValid || fCoveragePrimProc != pp) {
- fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
- this->numCoverageStages());
- fCoverageProcInfoValid = true;
- fCoveragePrimProc = pp;
- }
+ fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
+ this->numCoverageStages());
+ fCoverageProcInfoValid = false;
}
+void GrPipelineBuilder::calcColorInvariantOutput(const GrBatch* batch) const {
+ fColorProcInfo.calcColorWithBatch(batch, fColorStages.begin(), this->numColorStages());
+ fColorProcInfoValid = false;
+}
+
+void GrPipelineBuilder::calcCoverageInvariantOutput(const GrBatch* batch) const {
+ fCoverageProcInfo.calcCoverageWithBatch(batch, fCoverageStages.begin(),
+ this->numCoverageStages());
+ fCoverageProcInfoValid = false;
+}
+
+
void GrPipelineBuilder::calcColorInvariantOutput(GrColor color) const {
if (!fColorProcInfoValid || color != fColorCache) {
GrColorComponentFlags flags = kRGBA_GrColorComponentFlags;
#ifndef GrPipelineBuilder_DEFINED
#define GrPipelineBuilder_DEFINED
-
+#include "GrBatch.h"
#include "GrBlend.h"
#include "GrDrawTargetCaps.h"
#include "GrGeometryProcessor.h"
GrPipelineBuilder& operator= (const GrPipelineBuilder& that);
private:
+ // Calculating invariant color / coverage information is expensive, so we partially cache the
+ // results.
+ //
+ // canUseFracCoveragePrimProc() - Called in regular skia draw, caches results but only for a
+ // specific color and coverage. May be called multiple times
+ // willBlendWithDst() - only called by Nvpr, does not cache results
+ // GrOptDrawState constructor - never caches results
+
+ // TODO delete when we have Batch
const GrProcOptInfo& colorProcInfo(const GrPrimitiveProcessor* pp) const {
this->calcColorInvariantOutput(pp);
return fColorProcInfo;
return fCoverageProcInfo;
}
+ const GrProcOptInfo& colorProcInfo(const GrBatch* batch) const {
+ this->calcColorInvariantOutput(batch);
+ return fColorProcInfo;
+ }
+
+ const GrProcOptInfo& coverageProcInfo(const GrBatch* batch) const {
+ this->calcCoverageInvariantOutput(batch);
+ return fCoverageProcInfo;
+ }
+
/**
- * If fColorProcInfoValid is false, function calculates the invariant output for the color
- * stages and results are stored in fColorProcInfo.
+ * Primproc variants of the calc functions
+ * TODO remove these when batch is everywhere
*/
void calcColorInvariantOutput(const GrPrimitiveProcessor*) const;
+ void calcCoverageInvariantOutput(const GrPrimitiveProcessor*) const;
/**
- * If fCoverageProcInfoValid is false, function calculates the invariant output for the coverage
- * stages and results are stored in fCoverageProcInfo.
+ * GrBatch provides the initial seed for these loops based off of its initial geometry data
*/
- void calcCoverageInvariantOutput(const GrPrimitiveProcessor*) const;
+ void calcColorInvariantOutput(const GrBatch*) const;
+ void calcCoverageInvariantOutput(const GrBatch*) const;
/**
* If fColorProcInfoValid is false, function calculates the invariant output for the color
mutable bool fCoverageProcInfoValid;
mutable GrColor fColorCache;
mutable GrColor fCoverageCache;
- mutable const GrPrimitiveProcessor* fColorPrimProc;
- mutable const GrPrimitiveProcessor* fCoveragePrimProc;
friend class GrPipeline;
};
#include "GrProcOptInfo.h"
+#include "GrBatch.h"
#include "GrFragmentProcessor.h"
#include "GrFragmentStage.h"
#include "GrGeometryProcessor.h"
+void GrProcOptInfo::calcColorWithBatch(const GrBatch* batch,
+ const GrFragmentStage* stages,
+ int stageCount) {
+ GrInitInvariantOutput out;
+ batch->getInvariantOutputColor(&out);
+ fInOut.reset(out);
+ this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
+}
+
+void GrProcOptInfo::calcCoverageWithBatch(const GrBatch* batch,
+ const GrFragmentStage* stages,
+ int stageCount) {
+ GrInitInvariantOutput out;
+ batch->getInvariantOutputCoverage(&out);
+ fInOut.reset(out);
+ this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
+}
+
void GrProcOptInfo::calcColorWithPrimProc(const GrPrimitiveProcessor* primProc,
const GrFragmentStage* stages,
int stageCount) {
#include "GrColor.h"
#include "GrInvariantOutput.h"
+class GrBatch;
class GrFragmentStage;
class GrFragmentProcessor;
class GrPrimitiveProcessor;
void calcWithInitialValues(const GrFragmentStage*, int stageCount, GrColor startColor,
GrColorComponentFlags flags, bool areCoverageStages);
+ void calcColorWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
+ void calcCoverageWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
+
+ // TODO delete these when batch is everywhere
void calcColorWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*, int stagecount);
void calcCoverageWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*,
int stagecount);
#include "GrProcessor.h"
#include "GrContext.h"
#include "GrCoordTransform.h"
-#include "GrGeometryData.h"
#include "GrGeometryProcessor.h"
#include "GrInvariantOutput.h"
#include "GrMemoryPool.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
-/*
- * GrGeometryData shares the same pool so it lives in this file too
- */
-void* GrGeometryData::operator new(size_t size) {
- return GrProcessor_Globals::GetTLS()->allocate(size);
-}
-
-void GrGeometryData::operator delete(void* target) {
- GrProcessor_Globals::GetTLS()->release(target);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
// Initial static variable from GrXPFactory
int32_t GrXPFactory::gCurrXPFClassID =
GrXPFactory::kIllegalXPFClassID;
return (ce.fEdgeType == fEdgeType);
}
-void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
ConicBatchTracker* local = bt->cast<ConicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
return (ce.fEdgeType == fEdgeType);
}
-void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
QuadBatchTracker* local = bt->cast<QuadBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
return (ce.fEdgeType == fEdgeType);
}
-void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
CubicBatchTracker* local = bt->cast<CubicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
return SkNEW_ARGS(GrGLBitmapTextGeoProc, (*this, bt));
}
-void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
BitmapTextBatchTracker* local = bt->cast<BitmapTextBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps& caps) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker&,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
fCenterX == dce.fCenterX);
}
-void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DashingCircleBatchTracker* local = bt->cast<DashingCircleBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
fIntervalLength == de.fIntervalLength);
}
-void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DashingLineBatchTracker* local = bt->cast<DashingLineBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
return SkNEW_ARGS(GrGLDistanceFieldTextureEffect, (*this, bt));
}
-void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DistanceFieldBatchTracker* local = bt->cast<DistanceFieldBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
}
void GrDistanceFieldNoGammaTextureEffect::initBatchTracker(GrBatchTracker* bt,
- const InitBT& init) const {
+ const GrPipelineInfo& init) const {
DistanceFieldNoGammaBatchTracker* local = bt->cast<DistanceFieldNoGammaBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
}
void GrDistanceFieldLCDTextureEffect::initBatchTracker(GrBatchTracker* bt,
- const InitBT& init) const {
+ const GrPipelineInfo& init) const {
DistanceFieldLCDBatchTracker* local = bt->cast<DistanceFieldLCDBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,