#include "GrProcessor.h"
class GrCoordTransform;
-class GrGLCaps;
-typedef GrGLCaps GrGLSLCaps;
+class GrGLSLCaps;
class GrGLFragmentProcessor;
class GrProcessorKeyBuilder;
#include "GrTypes.h"
#include "SkXfermode.h"
-class GrDrawTargetCaps;
-class GrGLCaps;
-typedef GrGLCaps GrGLSLCaps;
+class GrShaderCaps;
+class GrGLSLCaps;
class GrGLXferProcessor;
class GrProcOptInfo;
const GrDrawTargetCaps& caps,
GrTexture*[]) {
// Doesn't work without derivative instructions.
- return caps.shaderDerivativeSupport() ?
+ return caps.shaderCaps()->shaderDerivativeSupport() ?
QuadEdgeEffect::Create(GrRandomColor(random),
GrTest::TestMatrix(random)) : NULL;
}
const SkPath& path,
const GrStrokeInfo& stroke,
bool antiAlias) const {
- return (target->caps()->shaderDerivativeSupport() && antiAlias &&
+ return (target->caps()->shaderCaps()->shaderDerivativeSupport() && antiAlias &&
stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex());
}
// TODO: Support inverse fill
// TODO: Support strokes
- if (!target->caps()->shaderDerivativeSupport() || !antiAlias || path.isInverseFillType()
- || path.isVolatile() || !stroke.isFillStyle()) {
+ if (!target->caps()->shaderCaps()->shaderDerivativeSupport() || !antiAlias
+ || path.isInverseFillType() || path.isVolatile() || !stroke.isFillStyle()) {
return false;
}
}
if (SkPath::kLine_SegmentMask == path.getSegmentMasks() ||
- target->caps()->shaderDerivativeSupport()) {
+ target->caps()->shaderCaps()->shaderDerivativeSupport()) {
return true;
}
return false;
// rasterizers and mask filters modify alpha, which doesn't
// translate well to distance
if (skPaint.getRasterizer() || skPaint.getMaskFilter() ||
- !fContext->getTextTarget()->caps()->shaderDerivativeSupport()) {
+ !fContext->getTextTarget()->caps()->shaderCaps()->shaderDerivativeSupport()) {
return false;
}
const SkDeviceProperties&
leakyProperties,
bool enableDistanceFieldFonts) {
- if (fGpu->caps()->pathRenderingSupport() && renderTarget->isMultisampled()) {
+ if (fGpu->caps()->shaderCaps()->pathRenderingSupport() && renderTarget->isMultisampled()) {
GrStencilAttachment* sb = renderTarget->renderTargetPriv().attachStencilAttachment();
if (sb) {
return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyProperties);
return 0;
}
int chosenSampleCount = 0;
- if (fGpu->caps()->pathRenderingSupport()) {
+ if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) {
if (dpi >= 250.0f) {
chosenSampleCount = 4;
} else {
int subPixelThresh = filter > GrTextureParams::kNone_FilterMode ? 4 : 1;
fPrecision = kDefault_GrSLPrecision;
if (texture->getContext()) {
- const GrDrawTargetCaps* caps = texture->getContext()->getGpu()->caps();
+ const GrShaderCaps* caps = texture->getContext()->getGpu()->caps()->shaderCaps();
if (caps->floatPrecisionVaries()) {
int maxD = SkTMax(texture->width(), texture->height());
- const GrDrawTargetCaps::PrecisionInfo* info;
+ const GrShaderCaps::PrecisionInfo* info;
info = &caps->getFloatShaderPrecisionInfo(kFragment_GrShaderType, fPrecision);
do {
SkASSERT(info->supported());
GrPathRendering::FillType fill) {
// TODO: extract portions of checkDraw that are relevant to path stenciling.
SkASSERT(path);
- SkASSERT(this->caps()->pathRenderingSupport());
+ SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
SkASSERT(pipelineBuilder);
// Setup clip
GrPathRendering::FillType fill) {
// TODO: extract portions of checkDraw that are relevant to path rendering.
SkASSERT(path);
- SkASSERT(this->caps()->pathRenderingSupport());
+ SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
SkASSERT(pipelineBuilder);
SkRect devBounds = path->getBounds();
PathTransformType transformType,
int count,
GrPathRendering::FillType fill) {
- SkASSERT(this->caps()->pathRenderingSupport());
+ SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
SkASSERT(pathRange);
SkASSERT(indices);
SkASSERT(0 == reinterpret_cast<long>(indices) % GrPathRange::PathIndexSizeInBytes(indexType));
///////////////////////////////////////////////////////////////////////////////
+void GrShaderCaps::reset() {
+ fShaderDerivativeSupport = false;
+ fGeometryShaderSupport = false;
+ fPathRenderingSupport = false;
+ fDstReadInShaderSupport = false;
+ fDualSourceBlendingSupport = false;
+
+ fShaderPrecisionVaries = false;
+}
+
+GrShaderCaps& GrShaderCaps::operator=(const GrShaderCaps& other) {
+ fShaderDerivativeSupport = other.fShaderDerivativeSupport;
+ fGeometryShaderSupport = other.fGeometryShaderSupport;
+ fPathRenderingSupport = other.fPathRenderingSupport;
+ fDstReadInShaderSupport = other.fDstReadInShaderSupport;
+ fDualSourceBlendingSupport = other.fDualSourceBlendingSupport;
+
+ fShaderPrecisionVaries = other.fShaderPrecisionVaries;
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ fFloatPrecisions[s][p] = other.fFloatPrecisions[s][p];
+ }
+ }
+ return *this;
+}
+
+static const char* shader_type_to_string(GrShaderType type) {
+ switch (type) {
+ case kVertex_GrShaderType:
+ return "vertex";
+ case kGeometry_GrShaderType:
+ return "geometry";
+ case kFragment_GrShaderType:
+ return "fragment";
+ }
+ return "";
+}
+
+static const char* precision_to_string(GrSLPrecision p) {
+ switch (p) {
+ case kLow_GrSLPrecision:
+ return "low";
+ case kMedium_GrSLPrecision:
+ return "medium";
+ case kHigh_GrSLPrecision:
+ return "high";
+ }
+ return "";
+}
+
+SkString GrShaderCaps::dump() const {
+ SkString r;
+ static const char* gNY[] = { "NO", "YES" };
+ r.appendf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
+ r.appendf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
+ r.appendf("Path Rendering Support : %s\n", gNY[fPathRenderingSupport]);
+ r.appendf("Dst Read In Shader Support : %s\n", gNY[fDstReadInShaderSupport]);
+ r.appendf("Dual Source Blending Support : %s\n", gNY[fDualSourceBlendingSupport]);
+
+ r.appendf("Shader Float Precisions (varies: %s):\n", gNY[fShaderPrecisionVaries]);
+
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ GrShaderType shaderType = static_cast<GrShaderType>(s);
+ r.appendf("\t%s:\n", shader_type_to_string(shaderType));
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ if (fFloatPrecisions[s][p].supported()) {
+ GrSLPrecision precision = static_cast<GrSLPrecision>(p);
+ r.appendf("\t\t%s: log_low: %d log_high: %d bits: %d\n",
+ precision_to_string(precision),
+ fFloatPrecisions[s][p].fLogRangeLow,
+ fFloatPrecisions[s][p].fLogRangeHigh,
+ fFloatPrecisions[s][p].fBits);
+ }
+ }
+ }
+
+ return r;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
void GrDrawTargetCaps::reset() {
fMipMapSupport = false;
fNPOTTextureTileSupport = false;
fTwoSidedStencilSupport = false;
fStencilWrapOpsSupport = false;
- fShaderDerivativeSupport = false;
- fGeometryShaderSupport = false;
- fDualSourceBlendingSupport = false;
- fPathRenderingSupport = false;
- fDstReadInShaderSupport = false;
fDiscardRenderTargetSupport = false;
fReuseScratchTextures = true;
fGpuTracingSupport = false;
fMaxTextureSize = 0;
fMaxSampleCount = 0;
- fShaderPrecisionVaries = false;
-
memset(fConfigRenderSupport, 0, sizeof(fConfigRenderSupport));
memset(fConfigTextureSupport, 0, sizeof(fConfigTextureSupport));
}
fNPOTTextureTileSupport = other.fNPOTTextureTileSupport;
fTwoSidedStencilSupport = other.fTwoSidedStencilSupport;
fStencilWrapOpsSupport = other.fStencilWrapOpsSupport;
- fShaderDerivativeSupport = other.fShaderDerivativeSupport;
- fGeometryShaderSupport = other.fGeometryShaderSupport;
- fDualSourceBlendingSupport = other.fDualSourceBlendingSupport;
- fPathRenderingSupport = other.fPathRenderingSupport;
- fDstReadInShaderSupport = other.fDstReadInShaderSupport;
fDiscardRenderTargetSupport = other.fDiscardRenderTargetSupport;
fReuseScratchTextures = other.fReuseScratchTextures;
fGpuTracingSupport = other.fGpuTracingSupport;
memcpy(fConfigRenderSupport, other.fConfigRenderSupport, sizeof(fConfigRenderSupport));
memcpy(fConfigTextureSupport, other.fConfigTextureSupport, sizeof(fConfigTextureSupport));
- fShaderPrecisionVaries = other.fShaderPrecisionVaries;
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- fFloatPrecisions[s][p] = other.fFloatPrecisions[s][p];
- }
- }
return *this;
}
return str;
}
-static const char* shader_type_to_string(GrShaderType type) {
- switch (type) {
- case kVertex_GrShaderType:
- return "vertex";
- case kGeometry_GrShaderType:
- return "geometry";
- case kFragment_GrShaderType:
- return "fragment";
- }
- return "";
-}
-
-static const char* precision_to_string(GrSLPrecision p) {
- switch (p) {
- case kLow_GrSLPrecision:
- return "low";
- case kMedium_GrSLPrecision:
- return "medium";
- case kHigh_GrSLPrecision:
- return "high";
- }
- return "";
-}
-
SkString GrDrawTargetCaps::dump() const {
SkString r;
static const char* gNY[] = {"NO", "YES"};
r.appendf("NPOT Texture Tile Support : %s\n", gNY[fNPOTTextureTileSupport]);
r.appendf("Two Sided Stencil Support : %s\n", gNY[fTwoSidedStencilSupport]);
r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
- r.appendf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
- r.appendf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
- r.appendf("Dual Source Blending Support : %s\n", gNY[fDualSourceBlendingSupport]);
- r.appendf("Path Rendering Support : %s\n", gNY[fPathRenderingSupport]);
- r.appendf("Dst Read In Shader Support : %s\n", gNY[fDstReadInShaderSupport]);
r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
gNY[fConfigTextureSupport[i]]);
}
- r.appendf("Shader Float Precisions (varies: %s):\n", gNY[fShaderPrecisionVaries]);
-
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- GrShaderType shaderType = static_cast<GrShaderType>(s);
- r.appendf("\t%s:\n", shader_type_to_string(shaderType));
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- if (fFloatPrecisions[s][p].supported()) {
- GrSLPrecision precision = static_cast<GrSLPrecision>(p);
- r.appendf("\t\t%s: log_low: %d log_high: %d bits: %d\n",
- precision_to_string(precision),
- fFloatPrecisions[s][p].fLogRangeLow,
- fFloatPrecisions[s][p].fLogRangeHigh,
- fFloatPrecisions[s][p].fBits);
- }
- }
- }
-
return r;
}
#include "SkRefCnt.h"
#include "SkString.h"
-/**
- * Represents the draw target capabilities.
- */
-class GrDrawTargetCaps : public SkRefCnt {
+class GrShaderCaps : public SkRefCnt {
public:
- SK_DECLARE_INST_COUNT(GrDrawTargetCaps)
+ SK_DECLARE_INST_COUNT(GrShaderCaps)
/** Info about shader variable precision within a given shader stage. That is, this info
is relevant to a float (or vecNf) variable declared with a GrSLPrecision
int fLogRangeLow;
/** floor(log2(|max_value|)) */
int fLogRangeHigh;
- /** Number of bits of precision. As defined in OpenGL (with names modified to reflect this
+ /** Number of bits of precision. As defined in OpenGL (with names modified to reflect this
struct) :
"""
- If the smallest representable value greater than 1 is 1 + e, then fBits will
- contain floor(log2(e)), and every value in the range [2^fLogRangeLow,
- 2^fLogRangeHigh] can be represented to at least one part in 2^fBits.
- """
+ If the smallest representable value greater than 1 is 1 + e, then fBits will
+ contain floor(log2(e)), and every value in the range [2^fLogRangeLow,
+ 2^fLogRangeHigh] can be represented to at least one part in 2^fBits.
+ """
*/
int fBits;
};
+ GrShaderCaps() {
+ this->reset();
+ }
+ virtual ~GrShaderCaps() {}
+ GrShaderCaps(const GrShaderCaps& other) : INHERITED() {
+ *this = other;
+ }
+ GrShaderCaps& operator= (const GrShaderCaps&);
+
+ virtual void reset();
+ virtual SkString dump() const;
+
+ bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
+ bool geometryShaderSupport() const { return fGeometryShaderSupport; }
+ bool pathRenderingSupport() const { return fPathRenderingSupport; }
+ bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
+ bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
+
+ /**
+ * Get the precision info for a variable of type kFloat_GrSLType, kVec2f_GrSLType, etc in a
+ * given shader type. If the shader type is not supported or the precision level is not
+ * supported in that shader type then the returned struct will report false when supported() is
+ * called.
+ */
+ const PrecisionInfo& getFloatShaderPrecisionInfo(GrShaderType shaderType,
+ GrSLPrecision precision) const {
+ return fFloatPrecisions[shaderType][precision];
+ };
+
+ /**
+ * Is there any difference between the float shader variable precision types? If this is true
+ * then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would
+ * report the same info for all precisions in all shader types.
+ */
+ bool floatPrecisionVaries() const { return fShaderPrecisionVaries; }
+
+protected:
+ bool fShaderDerivativeSupport : 1;
+ bool fGeometryShaderSupport : 1;
+ bool fPathRenderingSupport : 1;
+ bool fDstReadInShaderSupport : 1;
+ bool fDualSourceBlendingSupport : 1;
+
+ bool fShaderPrecisionVaries;
+ PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount];
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Represents the draw target capabilities.
+ */
+class GrDrawTargetCaps : public SkRefCnt {
+public:
+ SK_DECLARE_INST_COUNT(GrDrawTargetCaps)
GrDrawTargetCaps() {
+ fShaderCaps.reset(NULL);
this->reset();
}
GrDrawTargetCaps(const GrDrawTargetCaps& other) : INHERITED() {
virtual void reset();
virtual SkString dump() const;
+ GrShaderCaps* shaderCaps() const { return fShaderCaps; }
+
bool npotTextureTileSupport() const { return fNPOTTextureTileSupport; }
/** To avoid as-yet-unnecessary complexity we don't allow any partial support of MIP Maps (e.g.
only for POT textures) */
bool mipMapSupport() const { return fMipMapSupport; }
bool twoSidedStencilSupport() const { return fTwoSidedStencilSupport; }
bool stencilWrapOpsSupport() const { return fStencilWrapOpsSupport; }
- bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
- bool geometryShaderSupport() const { return fGeometryShaderSupport; }
- bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
- bool pathRenderingSupport() const { return fPathRenderingSupport; }
- bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
bool discardRenderTargetSupport() const { return fDiscardRenderTargetSupport; }
#if GR_FORCE_GPU_TRACE_DEBUGGING
bool gpuTracingSupport() const { return true; }
return fConfigTextureSupport[config];
}
- /**
- * Get the precision info for a variable of type kFloat_GrSLType, kVec2f_GrSLType, etc in a
- * given shader type. If the shader type is not supported or the precision level is not
- * supported in that shader type then the returned struct will report false when supported() is
- * called.
- */
- const PrecisionInfo& getFloatShaderPrecisionInfo(GrShaderType shaderType,
- GrSLPrecision precision) const {
- return fFloatPrecisions[shaderType][precision];
- };
-
- /**
- * Is there any difference between the float shader variable precision types? If this is true
- * then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would
- * report the same info for all precisions in all shader types.
- */
- bool floatPrecisionVaries() const { return fShaderPrecisionVaries; }
-
protected:
+ SkAutoTUnref<GrShaderCaps> fShaderCaps;
+
bool fNPOTTextureTileSupport : 1;
bool fMipMapSupport : 1;
bool fTwoSidedStencilSupport : 1;
bool fStencilWrapOpsSupport : 1;
- bool fShaderDerivativeSupport : 1;
- bool fGeometryShaderSupport : 1;
- bool fDualSourceBlendingSupport : 1;
- bool fPathRenderingSupport : 1;
- bool fDstReadInShaderSupport : 1;
bool fDiscardRenderTargetSupport : 1;
bool fReuseScratchTextures : 1;
bool fGpuTracingSupport : 1;
bool fConfigRenderSupport[kGrPixelConfigCnt][2];
bool fConfigTextureSupport[kGrPixelConfigCnt];
- bool fShaderPrecisionVaries;
- PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount];
-
private:
typedef SkRefCnt INHERITED;
};
if (SkScalarNearlyEqual(oval.width(), oval.height()) && circle_stays_circle(viewMatrix)) {
this->drawCircle(target, pipelineBuilder, color, viewMatrix, useCoverageAA, oval, stroke);
// if we have shader derivative support, render as device-independent
- } else if (target->caps()->shaderDerivativeSupport()) {
+ } else if (target->caps()->shaderCaps()->shaderDerivativeSupport()) {
return this->drawDIEllipse(target, pipelineBuilder, color, viewMatrix, useCoverageAA, oval,
stroke);
// otherwise axis-aligned ellipses only
};
class GrIndexBufferAllocPool;
-class GrGLCaps;
-typedef GrGLCaps GrGLSLCaps;
+class GrGLSLCaps;
class GrGLPrimitiveProcessor;
class GrVertexBufferAllocPool;
GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrContext* context) {
SkASSERT(context);
SkASSERT(context->getGpu());
- if (context->getGpu()->caps()->pathRenderingSupport()) {
+ if (context->getGpu()->caps()->shaderCaps()->pathRenderingSupport()) {
return SkNEW_ARGS(GrStencilAndCoverPathRenderer, (context->getGpu()));
} else {
return NULL;
}
GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrGpu* gpu) {
- SkASSERT(gpu->caps()->pathRenderingSupport());
+ SkASSERT(gpu->caps()->shaderCaps()->pathRenderingSupport());
fGpu = gpu;
gpu->ref();
}
const GrDrawTargetCaps& caps) const {
#ifdef SK_DEBUG
if (this->willReadDstColor(caps, colorPOI, coveragePOI)) {
- if (!caps.dstReadInShaderSupport()) {
+ if (!caps.shaderCaps()->dstReadInShaderSupport()) {
SkASSERT(dstCopy && dstCopy->texture());
} else {
SkASSERT(!dstCopy || !dstCopy->texture());
bool GrXPFactory::willNeedDstCopy(const GrDrawTargetCaps& caps, const GrProcOptInfo& colorPOI,
const GrProcOptInfo& coveragePOI) const {
- return (this->willReadDstColor(caps, colorPOI, coveragePOI) && !caps.dstReadInShaderSupport());
+ return (this->willReadDstColor(caps, colorPOI, coveragePOI)
+ && !caps.shaderCaps()->dstReadInShaderSupport());
}
uint8_t coverage = 0xff) {
switch (edgeType) {
case kFillAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrConicEffect, (color, viewMatrix, coverage,
kFillAA_GrProcessorEdgeType,
localMatrix));
case kHairlineAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrConicEffect, (color, viewMatrix, coverage,
uint8_t coverage = 0xff) {
switch (edgeType) {
case kFillAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrQuadEffect, (color, viewMatrix, coverage,
kFillAA_GrProcessorEdgeType,
localMatrix));
case kHairlineAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrQuadEffect, (color, viewMatrix, coverage,
const GrDrawTargetCaps& caps) {
switch (edgeType) {
case kFillAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrCubicEffect, (color, viewMatrix, kFillAA_GrProcessorEdgeType));
case kHairlineAA_GrProcessorEdgeType:
- if (!caps.shaderDerivativeSupport()) {
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
return NULL;
}
return SkNEW_ARGS(GrCubicEffect, (color, viewMatrix,
// blending if we have any effective coverage stages OR the geometry processor doesn't emits
// solid coverage.
if (!(optFlags & kSetCoverageDrawing_OptFlag) && !hasSolidCoverage) {
- if (caps.dualSourceBlendingSupport()) {
+ if (caps.shaderCaps()->dualSourceBlendingSupport()) {
if (kZero_GrBlendCoeff == fDstBlend) {
// write the coverage value to second color
fSecondaryOutputType = kCoverage_SecondaryOutputType;
const GrProcOptInfo& colorPOI,
const GrProcOptInfo& coveragePOI) const {
// We can always blend correctly if we have dual source blending.
- if (caps.dualSourceBlendingSupport()) {
+ if (caps.shaderCaps()->dualSourceBlendingSupport()) {
return false;
}
fUseNonVBOVertexAndIndexDynamicData = false;
fIsCoreProfile = false;
fFullClearIsFree = false;
- fDropsTileOnZeroDivide = false;
- fFBFetchSupport = false;
- fFBFetchNeedsCustomOutput = false;
- fFBFetchColorName = NULL;
- fFBFetchExtensionString = NULL;
fFBMixedSamplesSupport = false;
fReadPixelsSupportedCache.reset();
+
+ fShaderCaps.reset(SkNEW(GrGLSLCaps));
+
}
GrGLCaps::GrGLCaps(const GrGLCaps& caps) : GrDrawTargetCaps() {
fUseNonVBOVertexAndIndexDynamicData = caps.fUseNonVBOVertexAndIndexDynamicData;
fIsCoreProfile = caps.fIsCoreProfile;
fFullClearIsFree = caps.fFullClearIsFree;
- fDropsTileOnZeroDivide = caps.fDropsTileOnZeroDivide;
- fFBFetchSupport = caps.fFBFetchSupport;
- fFBFetchNeedsCustomOutput = caps.fFBFetchNeedsCustomOutput;
- fFBFetchColorName = caps.fFBFetchColorName;
- fFBFetchExtensionString = caps.fFBFetchExtensionString;
fFBMixedSamplesSupport = caps.fFBMixedSamplesSupport;
+ *(reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get())) =
+ *(reinterpret_cast<GrGLSLCaps*>(caps.fShaderCaps.get()));
+
return *this;
}
fES2CompatibilitySupport = true;
}
- if (kGLES_GrGLStandard == standard) {
- if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
- fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
- fFBFetchSupport = true;
- fFBFetchColorName = "gl_LastFragData[0]";
- fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
- } else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
- // Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know
- fFBFetchNeedsCustomOutput = false;
- fFBFetchSupport = true;
- fFBFetchColorName = "gl_LastFragData[0]";
- fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
- } else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
- // The arm extension also requires an additional flag which we will set onResetContext
- fFBFetchNeedsCustomOutput = false;
- fFBFetchSupport = true;
- fFBFetchColorName = "gl_LastFragColorARM";
- fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
- }
- }
-
- // Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader
- fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor();
-
this->initFSAASupport(ctxInfo, gli);
this->initStencilFormats(ctxInfo);
// attachment, hence this min:
fMaxRenderTargetSize = SkTMin(fMaxTextureSize, fMaxRenderTargetSize);
- fPathRenderingSupport = ctxInfo.hasExtension("GL_NV_path_rendering");
-
- if (fPathRenderingSupport) {
- if (kGL_GrGLStandard == standard) {
- // We only support v1.3+ of GL_NV_path_rendering which allows us to
- // set individual fragment inputs with ProgramPathFragmentInputGen. The API
- // additions are detected by checking the existence of the function.
- fPathRenderingSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access") &&
- ((ctxInfo.version() >= GR_GL_VER(4,3) ||
- ctxInfo.hasExtension("GL_ARB_program_interface_query")) &&
- gli->fFunctions.fProgramPathFragmentInputGen);
- } else {
- fPathRenderingSupport = ctxInfo.version() >= GR_GL_VER(3,1);
- }
- }
-
fFBMixedSamplesSupport = ctxInfo.hasExtension("GL_NV_framebuffer_mixed_samples");
fGpuTracingSupport = ctxInfo.hasExtension("GL_EXT_debug_marker");
- // For now these two are equivalent but we could have dst read in shader via some other method
- fDstReadInShaderSupport = fFBFetchSupport;
-
// Disable scratch texture reuse on Mali and Adreno devices
fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor() &&
kQualcomm_GrGLVendor != ctxInfo.vendor();
- // Enable supported shader-related caps
- if (kGL_GrGLStandard == standard) {
- fDualSourceBlendingSupport = ctxInfo.version() >= GR_GL_VER(3,3) ||
- ctxInfo.hasExtension("GL_ARB_blend_func_extended");
- fShaderDerivativeSupport = true;
- // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
- fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3,2) &&
- ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
- } else {
- fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) ||
- ctxInfo.hasExtension("GL_OES_standard_derivatives");
- }
-
if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxSampleCount);
} else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {
this->initConfigTexturableTable(ctxInfo, gli);
this->initConfigRenderableTable(ctxInfo);
- this->initShaderPrecisionTable(ctxInfo, gli);
+ reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get())->init(ctxInfo, gli);
return true;
}
fStencilVerifiedColorConfigs.push_back_n(fStencilFormats.count());
}
-static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
- switch (p) {
- case kLow_GrSLPrecision:
- return GR_GL_LOW_FLOAT;
- case kMedium_GrSLPrecision:
- return GR_GL_MEDIUM_FLOAT;
- case kHigh_GrSLPrecision:
- return GR_GL_HIGH_FLOAT;
- }
- SkFAIL("Unknown precision.");
- return -1;
-}
-
-static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
- switch (type) {
- case kVertex_GrShaderType:
- return GR_GL_VERTEX_SHADER;
- case kGeometry_GrShaderType:
- return GR_GL_GEOMETRY_SHADER;
- case kFragment_GrShaderType:
- return GR_GL_FRAGMENT_SHADER;
- }
- SkFAIL("Unknown shader type.");
- return -1;
-}
-
-void GrGLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo, const GrGLInterface* intf) {
- if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4,1) ||
- ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- if (kGeometry_GrShaderType != s) {
- GrShaderType shaderType = static_cast<GrShaderType>(s);
- GrGLenum glShader = shader_type_to_gl_shader(shaderType);
- PrecisionInfo* first = NULL;
- fShaderPrecisionVaries = false;
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- GrSLPrecision precision = static_cast<GrSLPrecision>(p);
- GrGLenum glPrecision = precision_to_gl_float_type(precision);
- GrGLint range[2];
- GrGLint bits;
- GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
- if (bits) {
- fFloatPrecisions[s][p].fLogRangeLow = range[0];
- fFloatPrecisions[s][p].fLogRangeHigh = range[1];
- fFloatPrecisions[s][p].fBits = bits;
- if (!first) {
- first = &fFloatPrecisions[s][p];
- } else if (!fShaderPrecisionVaries) {
- fShaderPrecisionVaries = (*first != fFloatPrecisions[s][p]);
- }
- }
- }
- }
- }
- } else {
- // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
- fShaderPrecisionVaries = false;
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- if (kGeometry_GrShaderType != s) {
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- fFloatPrecisions[s][p].fLogRangeLow = 127;
- fFloatPrecisions[s][p].fLogRangeHigh = 127;
- fFloatPrecisions[s][p].fBits = 23;
- }
- }
- }
- }
- // GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
- // the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
- // highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
- // are recommended against.
- if (fGeometryShaderSupport) {
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- fFloatPrecisions[kGeometry_GrShaderType][p] = fFloatPrecisions[kVertex_GrShaderType][p];
- }
- }
-}
-
-
void GrGLCaps::markColorConfigAndStencilFormatAsVerified(
GrPixelConfig config,
const GrGLStencilAttachment::Format& format) {
r.appendf("Core Profile: %s\n", (fIsCoreProfile ? "YES" : "NO"));
r.appendf("MSAA Type: %s\n", kMSFBOExtStr[fMSFBOType]);
- r.appendf("FB Fetch Support: %s\n", (fFBFetchSupport ? "YES" : "NO"));
r.appendf("Invalidate FB Type: %s\n", kInvalidateFBTypeStr[fInvalidateFBType]);
r.appendf("Map Buffer Type: %s\n", kMapBufferTypeStr[fMapBufferType]);
r.appendf("Max FS Uniform Vectors: %d\n", fMaxFragmentUniformVectors);
r.appendf("Use non-VBO for dynamic data: %s\n",
(fUseNonVBOVertexAndIndexDynamicData ? "YES" : "NO"));
r.appendf("Full screen clear is free: %s\n", (fFullClearIsFree ? "YES" : "NO"));
+ return r;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLSLCaps::GrGLSLCaps() {
+ this->reset();
+}
+
+
+void GrGLSLCaps::reset() {
+ INHERITED::reset();
+
+ fDropsTileOnZeroDivide = false;
+ fFBFetchSupport = false;
+ fFBFetchNeedsCustomOutput = false;
+ fFBFetchColorName = NULL;
+ fFBFetchExtensionString = NULL;
+}
+
+GrGLSLCaps::GrGLSLCaps(const GrGLSLCaps& caps) : GrShaderCaps() {
+ *this = caps;
+}
+
+GrGLSLCaps& GrGLSLCaps::operator= (const GrGLSLCaps& caps) {
+ INHERITED::operator=(caps);
+ fDropsTileOnZeroDivide = caps.fDropsTileOnZeroDivide;
+ fFBFetchSupport = caps.fFBFetchSupport;
+ fFBFetchNeedsCustomOutput = caps.fFBFetchNeedsCustomOutput;
+ fFBFetchColorName = caps.fFBFetchColorName;
+ fFBFetchExtensionString = caps.fFBFetchExtensionString;
+
+ return *this;
+}
+
+bool GrGLSLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+ this->reset();
+ if (!ctxInfo.isInitialized()) {
+ return false;
+ }
+
+ GrGLStandard standard = ctxInfo.standard();
+ GrGLVersion version = ctxInfo.version();
+
+ /**************************************************************************
+ * Caps specific to GrGLSLCaps
+ **************************************************************************/
+
+ if (kGLES_GrGLStandard == standard) {
+ if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
+ fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
+ fFBFetchSupport = true;
+ fFBFetchColorName = "gl_LastFragData[0]";
+ fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
+ }
+ else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
+ // Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know
+ fFBFetchNeedsCustomOutput = false;
+ fFBFetchSupport = true;
+ fFBFetchColorName = "gl_LastFragData[0]";
+ fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
+ }
+ else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
+ // The arm extension also requires an additional flag which we will set onResetContext
+ fFBFetchNeedsCustomOutput = false;
+ fFBFetchSupport = true;
+ fFBFetchColorName = "gl_LastFragColorARM";
+ fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
+ }
+ }
+
+ // Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader
+ fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor();
+
+ /**************************************************************************
+ * GrShaderCaps fields
+ **************************************************************************/
+
+ fPathRenderingSupport = ctxInfo.hasExtension("GL_NV_path_rendering");
+
+ if (fPathRenderingSupport) {
+ if (kGL_GrGLStandard == standard) {
+ // We only support v1.3+ of GL_NV_path_rendering which allows us to
+ // set individual fragment inputs with ProgramPathFragmentInputGen. The API
+ // additions are detected by checking the existence of the function.
+ fPathRenderingSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access") &&
+ ((ctxInfo.version() >= GR_GL_VER(4, 3) ||
+ ctxInfo.hasExtension("GL_ARB_program_interface_query")) &&
+ gli->fFunctions.fProgramPathFragmentInputGen);
+ }
+ else {
+ fPathRenderingSupport = ctxInfo.version() >= GR_GL_VER(3, 1);
+ }
+ }
+
+ // For now these two are equivalent but we could have dst read in shader via some other method
+ fDstReadInShaderSupport = fFBFetchSupport;
+
+ // Enable supported shader-related caps
+ if (kGL_GrGLStandard == standard) {
+ fDualSourceBlendingSupport = ctxInfo.version() >= GR_GL_VER(3, 3) ||
+ ctxInfo.hasExtension("GL_ARB_blend_func_extended");
+ fShaderDerivativeSupport = true;
+ // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
+ fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3, 2) &&
+ ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
+ }
+ else {
+ fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_standard_derivatives");
+ }
+
+ this->initShaderPrecisionTable(ctxInfo, gli);
+
+ return true;
+}
+
+SkString GrGLSLCaps::dump() const {
+ SkString r = INHERITED::dump();
+
+ r.appendf("--- GLSL-Specific ---\n");
+
+ r.appendf("FB Fetch Support: %s\n", (fFBFetchSupport ? "YES" : "NO"));
r.appendf("Drops tile on zero divide: %s\n", (fDropsTileOnZeroDivide ? "YES" : "NO"));
return r;
}
+
+static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
+ switch (p) {
+ case kLow_GrSLPrecision:
+ return GR_GL_LOW_FLOAT;
+ case kMedium_GrSLPrecision:
+ return GR_GL_MEDIUM_FLOAT;
+ case kHigh_GrSLPrecision:
+ return GR_GL_HIGH_FLOAT;
+ }
+ SkFAIL("Unknown precision.");
+ return -1;
+}
+
+static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
+ switch (type) {
+ case kVertex_GrShaderType:
+ return GR_GL_VERTEX_SHADER;
+ case kGeometry_GrShaderType:
+ return GR_GL_GEOMETRY_SHADER;
+ case kFragment_GrShaderType:
+ return GR_GL_FRAGMENT_SHADER;
+ }
+ SkFAIL("Unknown shader type.");
+ return -1;
+}
+
+void GrGLSLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* intf) {
+ if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4, 1) ||
+ ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ if (kGeometry_GrShaderType != s) {
+ GrShaderType shaderType = static_cast<GrShaderType>(s);
+ GrGLenum glShader = shader_type_to_gl_shader(shaderType);
+ PrecisionInfo* first = NULL;
+ fShaderPrecisionVaries = false;
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ GrSLPrecision precision = static_cast<GrSLPrecision>(p);
+ GrGLenum glPrecision = precision_to_gl_float_type(precision);
+ GrGLint range[2];
+ GrGLint bits;
+ GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
+ if (bits) {
+ fFloatPrecisions[s][p].fLogRangeLow = range[0];
+ fFloatPrecisions[s][p].fLogRangeHigh = range[1];
+ fFloatPrecisions[s][p].fBits = bits;
+ if (!first) {
+ first = &fFloatPrecisions[s][p];
+ }
+ else if (!fShaderPrecisionVaries) {
+ fShaderPrecisionVaries = (*first != fFloatPrecisions[s][p]);
+ }
+ }
+ }
+ }
+ }
+ }
+ else {
+ // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
+ fShaderPrecisionVaries = false;
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ if (kGeometry_GrShaderType != s) {
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ fFloatPrecisions[s][p].fLogRangeLow = 127;
+ fFloatPrecisions[s][p].fLogRangeHigh = 127;
+ fFloatPrecisions[s][p].fBits = 23;
+ }
+ }
+ }
+ }
+ // GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
+ // the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
+ // highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
+ // are recommended against.
+ if (fGeometryShaderSupport) {
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ fFloatPrecisions[kGeometry_GrShaderType][p] = fFloatPrecisions[kVertex_GrShaderType][p];
+ }
+ }
+}
+
+
+
+
#include "SkTArray.h"
class GrGLContextInfo;
+class GrGLSLCaps;
/**
* Stores some capabilities of a GL context. Most are determined by the GL
kES_EXT_MsToTexture_MSFBOType == fMSFBOType;
}
- /**
- * Some helper functions for encapsulating various extensions to read FB Buffer on openglES
- *
- * TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
- */
- bool fbFetchSupport() const { return fFBFetchSupport; }
-
- bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
-
- const char* fbFetchColorName() const { return fFBFetchColorName; }
-
- const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
-
bool fbMixedSamplesSupport() const { return fFBMixedSamplesSupport; }
InvalidateFBType invalidateFBType() const { return fInvalidateFBType; }
bool fullClearIsFree() const { return fFullClearIsFree; }
- bool dropsTileOnZeroDivide() const { return fDropsTileOnZeroDivide; }
-
/**
* Returns a string containing the caps info.
*/
LATCAlias latcAlias() const { return fLATCAlias; }
+ GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
+
private:
/**
* Maintains a bit per GrPixelConfig. It is used to avoid redundantly
void initConfigRenderableTable(const GrGLContextInfo&);
void initConfigTexturableTable(const GrGLContextInfo&, const GrGLInterface*);
- // Must be called after fGeometryShaderSupport is initialized.
- void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*);
-
bool doReadPixelsSupported(const GrGLInterface* intf, GrGLenum format, GrGLenum type) const;
// tracks configs that have been verified to pass the FBO completeness when
bool fUseNonVBOVertexAndIndexDynamicData : 1;
bool fIsCoreProfile : 1;
bool fFullClearIsFree : 1;
- bool fDropsTileOnZeroDivide : 1;
- bool fFBFetchSupport : 1;
- bool fFBFetchNeedsCustomOutput : 1;
bool fFBMixedSamplesSupport : 1;
- const char* fFBFetchColorName;
- const char* fFBFetchExtensionString;
-
struct ReadPixelsSupportedFormat {
GrGLenum fFormat;
GrGLenum fType;
typedef GrDrawTargetCaps INHERITED;
};
-typedef GrGLCaps GrGLSLCaps;
+
+class GrGLSLCaps : public GrShaderCaps {
+public:
+ SK_DECLARE_INST_COUNT(GrGLSLCaps)
+
+ /**
+ * Creates a GrGLSLCaps that advertises no support for any extensions,
+ * formats, etc. Call init to initialize from a GrGLContextInfo.
+ */
+ GrGLSLCaps();
+ ~GrGLSLCaps() override {}
+
+ GrGLSLCaps(const GrGLSLCaps& caps);
+
+ GrGLSLCaps& operator = (const GrGLSLCaps& caps);
+
+ /**
+ * Resets the caps such that nothing is supported.
+ */
+ void reset() override;
+
+ /**
+ * Initializes the GrGLSLCaps to the set of features supported in the current
+ * OpenGL context accessible via ctxInfo.
+ */
+ bool init(const GrGLContextInfo& ctxInfo, const GrGLInterface* glInterface);
+
+ /**
+ * Some helper functions for encapsulating various extensions to read FB Buffer on openglES
+ *
+ * TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
+ */
+ bool fbFetchSupport() const { return fFBFetchSupport; }
+
+ bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
+
+ const char* fbFetchColorName() const { return fFBFetchColorName; }
+
+ const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
+
+ bool dropsTileOnZeroDivide() const { return fDropsTileOnZeroDivide; }
+
+ /**
+ * Returns a string containing the caps info.
+ */
+ SkString dump() const override;
+
+private:
+ // Must be called after fGeometryShaderSupport is initialized.
+ void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*);
+
+ bool fDropsTileOnZeroDivide : 1;
+ bool fFBFetchSupport : 1;
+ bool fFBFetchNeedsCustomOutput : 1;
+
+ const char* fFBFetchColorName;
+ const char* fFBFetchExtensionString;
+
+ typedef GrShaderCaps INHERITED;
+};
+
#endif
fTempDstFBOID = 0;
fStencilClearFBOID = 0;
- if (this->glCaps().pathRenderingSupport()) {
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
fPathRendering.reset(new GrGLPathRendering(this));
}
}
fTempSrcFBOID = 0;
fTempDstFBOID = 0;
fStencilClearFBOID = 0;
- if (this->glCaps().pathRenderingSupport()) {
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
this->glPathRendering()->abandonGpuResources();
}
}
}
if (resetBits & kPathRendering_GrGLBackendState) {
- if (this->caps()->pathRenderingSupport()) {
+ if (this->caps()->shaderCaps()->pathRenderingSupport()) {
this->glPathRendering()->resetContext();
}
}
const GrGLCaps& glCaps() const { return *fGLContext.caps(); }
GrGLPathRendering* glPathRendering() {
- SkASSERT(glCaps().pathRenderingSupport());
+ SkASSERT(glCaps().shaderCaps()->pathRenderingSupport());
return static_cast<GrGLPathRendering*>(pathRendering());
}
const void* indices, PathIndexType indexType,
const float transformValues[], PathTransformType transformType,
int count, const GrStencilSettings& stencilSettings) {
- SkASSERT(fGpu->caps()->pathRenderingSupport());
+ SkASSERT(fGpu->caps()->shaderCaps()->pathRenderingSupport());
GrGLuint baseID = static_cast<const GrGLPathRange*>(pathRange)->basePathID();
const SkISize& renderTargetSize,
GrSurfaceOrigin renderTargetOrigin) {
- SkASSERT(fGpu->glCaps().pathRenderingSupport());
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
GrProcessorKeyBuilder b(&glDesc->key());
- primProc.getGLProcessorKey(batchTracker, gpu->glCaps(), &b);
+ primProc.getGLProcessorKey(batchTracker, *gpu->glCaps().glslCaps(), &b);
//**** use glslCaps here?
if (!get_meta_key(primProc, gpu->glCaps(), 0, &b)) {
glDesc->key().reset();
for (int s = 0; s < pipeline.numFragmentStages(); ++s) {
const GrPendingFragmentStage& fps = pipeline.getFragmentStage(s);
const GrFragmentProcessor& fp = *fps.processor();
- fp.getGLProcessorKey(gpu->glCaps(), &b);
+ fp.getGLProcessorKey(*gpu->glCaps().glslCaps(), &b);
//**** use glslCaps here?
if (!get_meta_key(fp, gpu->glCaps(), primProc.getTransformKey(fp.coordTransforms()), &b)) {
glDesc->key().reset();
}
const GrXferProcessor& xp = *pipeline.getXferProcessor();
- xp.getGLProcessorKey(gpu->glCaps(), &b);
+ xp.getGLProcessorKey(*gpu->glCaps().glslCaps(), &b);
//**** use glslCaps here?
if (!get_meta_key(xp, gpu->glCaps(), 0, &b)) {
glDesc->key().reset();
GrGLFragmentShaderBuilder::DstReadKey
GrGLFragmentShaderBuilder::KeyForDstRead(const GrTexture* dstCopy, const GrGLCaps& caps) {
uint32_t key = kYesDstRead_DstReadKeyBit;
- if (caps.fbFetchSupport()) {
+ if (caps.glslCaps()->fbFetchSupport()) {
return key;
}
SkASSERT(dstCopy);
switch (feature) {
case kStandardDerivatives_GLSLFeature: {
GrGLGpu* gpu = fProgramBuilder->gpu();
- if (!gpu->glCaps().shaderDerivativeSupport()) {
+ if (!gpu->glCaps().shaderCaps()->shaderDerivativeSupport()) {
return false;
}
if (kGLES_GrGLStandard == gpu->glStandard() &&
fHasReadDstColor = true;
GrGLGpu* gpu = fProgramBuilder->gpu();
- if (gpu->glCaps().fbFetchSupport()) {
+ if (gpu->glCaps().glslCaps()->fbFetchSupport()) {
this->addFeature(1 << (GrGLFragmentShaderBuilder::kLastGLSLPrivateFeature + 1),
- gpu->glCaps().fbFetchExtensionString());
+ gpu->glCaps().glslCaps()->fbFetchExtensionString());
// Some versions of this extension string require declaring custom color output on ES 3.0+
- const char* fbFetchColorName = gpu->glCaps().fbFetchColorName();
- if (gpu->glCaps().fbFetchNeedsCustomOutput()) {
+ const char* fbFetchColorName = gpu->glCaps().glslCaps()->fbFetchColorName();
+ if (gpu->glCaps().glslCaps()->fbFetchNeedsCustomOutput()) {
this->enableCustomOutput();
fOutputs[fCustomColorOutputIndex].setTypeModifier(GrShaderVar::kInOut_TypeModifier);
fbFetchColorName = declared_color_output_name();
GrGLProgramBuilder* GrGLProgramBuilder::CreateProgramBuilder(const DrawArgs& args,
GrGLGpu* gpu) {
if (args.fPrimitiveProcessor->isPathRendering()) {
- SkASSERT(gpu->glCaps().pathRenderingSupport() &&
+ SkASSERT(gpu->glCaps().shaderCaps()->pathRenderingSupport() &&
!args.fPrimitiveProcessor->willUseGeoShader() &&
args.fPrimitiveProcessor->numAttribs() == 0);
return SkNEW_ARGS(GrGLNvprProgramBuilder, (gpu, args));
fGeometryProcessor = SkNEW(GrGLInstalledGeoProc);
const GrBatchTracker& bt = this->batchTracker();
- fGeometryProcessor->fGLProc.reset(gp.createGLInstance(bt, fGpu->glCaps()));
+ fGeometryProcessor->fGLProc.reset(gp.createGLInstance(bt, *fGpu->glCaps().glslCaps()));
SkSTArray<4, GrGLProcessor::TextureSampler> samplers(gp.numTextures());
this->emitSamplers(gp, &samplers, fGeometryProcessor);
pipelineBuilder.setClip(clip);
// if path rendering we have to setup a couple of things like the draw type
- bool usePathRendering = gpu->glCaps().pathRenderingSupport() && random.nextBool();
+ bool usePathRendering = gpu->glCaps().shaderCaps()->pathRenderingSupport() &&
+ random.nextBool();
// twiddle drawstate knobs randomly
bool hasGeometryProcessor = !usePathRendering;