kPictureRecord_benchModes
};
+#ifdef SK_DEBUG
+static const bool kDebugOnly = true;
+#else
+static const bool kDebugOnly = false;
+#endif
+
///////////////////////////////////////////////////////////////////////////////
static void erase(SkBitmap& bm) {
#if SK_ANGLE
{ SkBitmap::kARGB_8888_Config, "ANGLE", 0, kGPU_Backend, GrContextFactory::kANGLE_GLContextType, true },
#endif // SK_ANGLE
-#ifdef SK_DEBUG
- { SkBitmap::kARGB_8888_Config, "Debug", 0, kGPU_Backend, GrContextFactory::kDebug_GLContextType, GR_DEBUG },
-#endif // SK_DEBUG
+ { SkBitmap::kARGB_8888_Config, "Debug", 0, kGPU_Backend, GrContextFactory::kDebug_GLContextType, kDebugOnly },
{ SkBitmap::kARGB_8888_Config, "NULLGPU", 0, kGPU_Backend, GrContextFactory::kNull_GLContextType, true },
#endif // SK_SUPPORT_GPU
};
#include "SkTileGridPicture.h"
#include "SamplePipeControllers.h"
+#ifdef SK_DEBUG
+static const bool kDebugOnly = true;
+#else
+static const bool kDebugOnly = false;
+#endif
+
__SK_FORCE_IMAGE_DECODER_LINKING;
#ifdef SK_BUILD_FOR_WIN
{ SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kNative_GLContextType, 4, kRW_ConfigFlag, "msaa4", false},
/* The gpudebug context does not generate meaningful images, so don't record
* the images it generates! We only run it to look for asserts. */
- { SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kDebug_GLContextType, 0, kNone_ConfigFlag, "gpudebug", GR_DEBUG},
+ { SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kDebug_GLContextType, 0, kNone_ConfigFlag, "gpudebug", kDebugOnly},
/* The gpunull context does the least amount of work possible and doesn't
generate meaninful images, so don't record them!. It can be run to
isolate the CPU-side processing expense from the GPU-side.
*/
- { SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kNull_GLContextType, 0, kNone_ConfigFlag, "gpunull", GR_DEBUG},
+ { SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kNull_GLContextType, 0, kNone_ConfigFlag, "gpunull", kDebugOnly},
#if SK_ANGLE
{ SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kANGLE_GLContextType, 0, kRW_ConfigFlag, "angle", true },
{ SkBitmap::kARGB_8888_Config, kGPU_Backend, GrContextFactory::kANGLE_GLContextType, 16, kRW_ConfigFlag, "anglemsaa16", true },
'Debug': {
'defines': [
'SK_DEBUG',
- 'GR_DEBUG=1',
'SK_DEVELOPER=1',
],
},
#endif
#endif
-// we need both GR_DEBUG and GR_RELEASE to be defined as 0 or 1
-//
-#ifndef GR_DEBUG
- #ifdef GR_RELEASE
- #define GR_DEBUG !GR_RELEASE
- #else
- #ifdef NDEBUG
- #define GR_DEBUG 0
- #else
- #define GR_DEBUG 1
- #endif
+#if !defined(SK_DEBUG) && !GR_RELEASE
+ #ifdef NDEBUG
+ #define GR_RELEASE 1
#endif
#endif
-#ifndef GR_RELEASE
- #define GR_RELEASE !GR_DEBUG
-#endif
-
-#if GR_DEBUG == GR_RELEASE
- #error "GR_DEBUG and GR_RELEASE must not be the same"
+#if defined(SK_DEBUG) && GR_RELEASE
+ #error "cannot define both SK_DEBUG and GR_RELEASE"
#endif
///////////////////////////////////////////////////////////////////////////////
* A alternate user config file can be specified by defining
* GR_USER_CONFIG_FILE. It should be defined relative to GrConfig.h
*
- * e.g. it can specify GR_DEBUG/GR_RELEASE as it please, change the BUILD
- * target, or supply its own defines for anything else (e.g. GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT)
+ * e.g. it can change the BUILD target or supply its own defines for anything
+ * else (e.g. GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT)
*/
#if !defined(GR_USER_CONFIG_FILE)
#include "GrUserConfig.h"
* GR_DEBUGBREAK is an unconditional break in debug builds.
*/
#if !defined(GR_DEBUGBREAK)
- #if GR_DEBUG
+ #ifdef SK_DEBUG
#define GR_DEBUGBREAK GR_ALWAYSBREAK
#else
#define GR_DEBUGBREAK
* GR_DEBUGASSERT is an assertion in debug builds only.
*/
#if !defined(GR_DEBUGASSERT)
- #if GR_DEBUG
+ #ifdef SK_DEBUG
#define GR_DEBUGASSERT(COND) GR_ALWAYSASSERT(COND)
#else
#define GR_DEBUGASSERT(COND)
* GR_DEBUGCODE compiles the code X in debug builds only
*/
#if !defined(GR_DEBUGCODE)
- #if GR_DEBUG
+ #ifdef SK_DEBUG
#define GR_DEBUGCODE(X) X
#else
#define GR_DEBUGCODE(X)
return false;
}
bool result = this->onIsEqual(other);
-#if GR_DEBUG
+#ifdef SK_DEBUG
if (result) {
SkASSERT(this->numTextures() == other.numTextures());
for (int i = 0; i < this->numTextures(); ++i) {
bool isWrapped() const { return kWrapped_Flag & fFlags; }
private:
-#if GR_DEBUG
+#ifdef SK_DEBUG
friend class GrGpu; // for assert in GrGpu to access getGpu
#endif
EffectKey effectKey = GLEffect::GenKey(drawEffect, caps);
EffectKey textureKey = GLEffect::GenTextureKey(drawEffect, caps);
EffectKey attribKey = GLEffect::GenAttribKey(drawEffect);
-#if GR_DEBUG
+#ifdef SK_DEBUG
static const EffectKey kIllegalIDMask = (uint16_t) (~((1U << kEffectKeyBits) - 1));
SkASSERT(!(kIllegalIDMask & effectKey));
*/
virtual void invalidateCachedState() = 0;
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const {
this->INHERITED::validate();
SkShader::TileMode tileXAndY = SkShader::kClamp_TileMode);
bool operator== (const GrTextureAccess& other) const {
-#if GR_DEBUG
+#ifdef SK_DEBUG
// below assumes all chars in fSwizzle are initialized even if string is < 4 chars long.
SkASSERT(memcmp(fSwizzle, other.fSwizzle, sizeof(fSwizzle)-1) ==
strcmp(fSwizzle, other.fSwizzle));
*/
typedef int32_t GrFixed;
-#if GR_DEBUG
+#ifdef SK_DEBUG
static inline int16_t GrToS16(intptr_t x) {
SkASSERT((int16_t)x == x);
-
/*
* Copyright 2010 Google Inc.
*
* found in the LICENSE file.
*/
-
#ifndef GrUserConfig_DEFINED
#define GrUserConfig_DEFINED
#error "default user config pulled in but GR_USER_CONFIG_FILE is defined."
#endif
-#if 0
- #undef GR_RELEASE
- #undef GR_DEBUG
- #define GR_RELEASE 0
- #define GR_DEBUG 1
-#endif
-
/**
* This gives a threshold in bytes of when to lock a GrGeometryBuffer vs using
* updateData. (Note the depending on the underlying 3D API the update functions
#include "SkRegion.h"
#include "SkClipStack.h"
-#if (GR_DEBUG && defined(SK_RELEASE)) || (GR_RELEASE && defined(SK_DEBUG))
-// #error "inconsistent GR_DEBUG and SK_DEBUG"
+#if (GR_RELEASE && defined(SK_DEBUG))
+// #error "inconsistent GR_RELEASE and SK_DEBUG"
#endif
////////////////////////////////////////////////////////////////////////////////
* GR_GL_LOG_CALLS is 1. Defaults to 0.
*
* GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call.
- * Defaults to 1 if GR_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
+ * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
* this can be toggled in a debugger using the gCheckErrorGL global. The initial
* value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START.
*
*/
#if !defined(GR_GL_LOG_CALLS)
- #define GR_GL_LOG_CALLS GR_DEBUG
+ #ifdef SK_DEBUG
+ #define GR_GL_LOG_CALLS 1
+ #else
+ #define GR_GL_LOG_CALLS 0
+ #endif
#endif
#if !defined(GR_GL_LOG_CALLS_START)
#endif
#if !defined(GR_GL_CHECK_ERROR)
- #define GR_GL_CHECK_ERROR GR_DEBUG
+ #ifdef SK_DEBUG
+ #define GR_GL_CHECK_ERROR 1
+ #else
+ #define GR_GL_CHECK_ERROR 0
+ #endif
#endif
#if !defined(GR_GL_CHECK_ERROR_START)
devBounds.outset(SK_Scalar1, SK_Scalar1);
// Check devBounds
-#if GR_DEBUG
+#ifdef SK_DEBUG
SkRect tolDevBounds = devBounds;
tolDevBounds.outset(SK_Scalar1 / 10000, SK_Scalar1 / 10000);
SkRect actualBounds;
// Takes 178th time of logf on Z600 / VC2010
int get_float_exp(float x) {
GR_STATIC_ASSERT(sizeof(int) == sizeof(float));
-#if GR_DEBUG
+#ifdef SK_DEBUG
static bool tested;
if (!tested) {
tested = true;
fAAStrokeRectIndexBuffer =
gpu->createIndexBuffer(sizeof(gStrokeAARectIdx), false);
if (NULL != fAAStrokeRectIndexBuffer) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool updated =
#endif
fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx,
}
}
-
-#if GR_DEBUG
+#ifdef SK_DEBUG
void GrAllocPool::validate() const {
Block* block = fBlock;
Block* fBlock;
size_t fMinBlockSize;
-#if GR_DEBUG
+#ifdef SK_DEBUG
int fBlocksAllocated;
void validate() const;
#else
#define BORDER 1
-#if GR_DEBUG
+#ifdef SK_DEBUG
static int gCounter;
#endif
fMaskFormat = format;
-#if GR_DEBUG
+#ifdef SK_DEBUG
// GrPrintf(" GrAtlas %p [%d %d] %d\n", this, plotX, plotY, gCounter);
gCounter += 1;
#endif
delete fRects;
-#if GR_DEBUG
+#ifdef SK_DEBUG
--gCounter;
// GrPrintf("~GrAtlas %p [%d %d] %d\n", this, fPlot.fX, fPlot.fY, gCounter);
#endif
void reset() {
fHash = 0;
-#if GR_DEBUG
+#ifdef SK_DEBUG
fIsValid = false;
#endif
}
hash += (fHash << 3);
hash ^= (fHash >> 11);
hash += (fHash << 15);
-#if GR_DEBUG
+#ifdef SK_DEBUG
fIsValid = true;
#endif
fHash = hash;
uint32_t fHash;
uint8_t fData[KEY_SIZE]; // Buffer for key storage
-#if GR_DEBUG
+#ifdef SK_DEBUG
public:
bool fIsValid;
#endif
#include "GrTypes.h"
#include "GrVertexBuffer.h"
-#if GR_DEBUG
+#ifdef SK_DEBUG
#define VALIDATE validate
#else
static void VALIDATE(bool = false) {}
VALIDATE();
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
if (NULL != fBufferPtr) {
SkASSERT(!fBlocks.empty());
bool createBlock(size_t requestSize);
void destroyBlock();
void flushCpuData(GrGeometryBuffer* buffer, size_t flushSize);
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate(bool unusedBlockAllowed = false) const;
#endif
// limitations) should we disable AA or draw wrong?
#define DISABLE_COVERAGE_AA_FOR_BLEND 1
-#if GR_DEBUG
+#ifdef SK_DEBUG
// change this to a 1 to see notifications when partial coverage fails
#define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
#else
*useVertexCoverage = false;
if (!target->getDrawState().canTweakAlphaForCoverage()) {
if (disable_coverage_aa_for_blend(target)) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
//GrPrintf("Turning off AA to correctly apply blend.\n");
#endif
return false;
// aa. If we have some future driver-mojo path AA that can do the right
// thing WRT to the blend then we'll need some query on the PR.
if (disable_coverage_aa_for_blend(target)) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
//GrPrintf("Turning off AA to correctly apply blend.\n");
#endif
useAA = false;
}
if (NULL == pr) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrPrintf("Unable to find path renderer compatible with path.\n");
#endif
return;
static size_t vertex_size(const GrVertexAttrib* attribs, int count) {
// this works as long as we're 4 byte-aligned
-#if GR_DEBUG
+#ifdef SK_DEBUG
uint32_t overlapCheck = 0;
#endif
SkASSERT(count <= GrDrawState::kMaxVertexAttribCnt);
for (int index = 0; index < count; ++index) {
size_t attribSize = GrVertexAttribTypeSize(attribs[index].fType);
size += attribSize;
-#if GR_DEBUG
+#ifdef SK_DEBUG
size_t dwordCount = attribSize >> 2;
uint32_t mask = (1 << dwordCount)-1;
size_t offsetShift = attribs[index].fOffset >> 2;
memset(fCommon.fFixedFunctionVertexAttribIndices,
0xff,
sizeof(fCommon.fFixedFunctionVertexAttribIndices));
-#if GR_DEBUG
+#ifdef SK_DEBUG
uint32_t overlapCheck = 0;
#endif
for (int i = 0; i < count; ++i) {
GrVertexAttribTypeVectorCount(attribs[i].fType));
fCommon.fFixedFunctionVertexAttribIndices[attribs[i].fBinding] = i;
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
size_t dwordCount = GrVertexAttribTypeSize(attribs[i].fType) >> 2;
uint32_t mask = (1 << dwordCount)-1;
size_t offsetShift = attribs[i].fOffset >> 2;
void setBlendFunc(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) {
fCommon.fSrcBlend = srcCoeff;
fCommon.fDstBlend = dstCoeff;
- #if GR_DEBUG
+ #ifdef SK_DEBUG
if (GrBlendCoeffRefsDst(dstCoeff)) {
GrPrintf("Unexpected dst blend coeff. Won't work correctly with coverage stages.\n");
}
return *this;
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool GrDrawTarget::DrawInfo::isInstanced() const {
if (fInstanceCount > 0) {
SkASSERT(0 == fIndexCount % fIndicesPerInstance);
// We assume that fDrawState always owns a ref to the object it points at.
fDefaultDrawState.ref();
GeometrySrcState& geoSrc = fGeoSrcStateStack.push_back();
-#if GR_DEBUG
+#ifdef SK_DEBUG
geoSrc.fVertexCount = DEBUG_INVAL_START_IDX;
geoSrc.fVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
geoSrc.fIndexCount = DEBUG_INVAL_START_IDX;
break;
case kBuffer_GeometrySrcType:
geoSrc.fVertexBuffer->unref();
-#if GR_DEBUG
+#ifdef SK_DEBUG
geoSrc.fVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
#endif
break;
break;
case kBuffer_GeometrySrcType:
geoSrc.fIndexBuffer->unref();
-#if GR_DEBUG
+#ifdef SK_DEBUG
geoSrc.fIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
#endif
break;
GeometrySrcState& newState = fGeoSrcStateStack.push_back();
newState.fIndexSrc = kNone_GeometrySrcType;
newState.fVertexSrc = kNone_GeometrySrcType;
-#if GR_DEBUG
+#ifdef SK_DEBUG
newState.fVertexCount = ~0;
newState.fVertexBuffer = (GrVertexBuffer*)~0;
newState.fIndexCount = ~0;
int startIndex, int vertexCount,
int indexCount) const {
const GrDrawState& drawState = this->getDrawState();
-#if GR_DEBUG
+#ifdef SK_DEBUG
const GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
int maxVertex = startVertex + vertexCount;
int maxValidVertex;
SkIRect drawIBounds;
if (info->getDevIBounds(&drawIBounds)) {
if (!copyRect.intersect(drawIBounds)) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrPrintf("Missed an early reject. Bailing on draw from setupDstReadIfNecessary.\n");
#endif
return false;
}
} else {
-#if GR_DEBUG
+#ifdef SK_DEBUG
//GrPrintf("No dev bounds when dst copy is made.\n");
#endif
}
int instanceCount() const { return fInstanceCount; }
bool isIndexed() const { return fIndexCount > 0; }
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool isInstanced() const; // this version is longer because of asserts
#else
bool isInstanced() const { return fInstanceCount > 0; }
fClipMaskManager.setGpu(this);
fGeomPoolStateStack.push_back();
-#if GR_DEBUG
+#ifdef SK_DEBUG
GeometryPoolState& poolState = fGeomPoolStateStack.back();
poolState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
poolState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
void GrGpu::unimpl(const char msg[]) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg);
#endif
}
this->finalizeReservedIndices();
}
GeometryPoolState& newState = fGeomPoolStateStack.push_back();
-#if GR_DEBUG
+#ifdef SK_DEBUG
newState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
newState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
newState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
void GrGpu::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) {
this->prepareVertexPool();
GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool success =
#endif
fVertexPool->appendVertices(this->getVertexSize(),
void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) {
this->prepareIndexPool();
GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool success =
#endif
fIndexPool->appendIndices(indexCount,
glInterfaceUnref.reset(glInterface);
}
if (NULL == glInterface) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrPrintf("No GL interface provided!\n");
#endif
return NULL;
GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
poolState.fUsedPoolVertexBytes = 0;
poolState.fUsedPoolIndexBytes = 0;
-#if GR_DEBUG
+#ifdef SK_DEBUG
poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
poolState.fPoolStartVertex = ~0;
poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
GeometryPoolState& poolState = fGeoPoolStateStack.back();
SkASSERT(0 == poolState.fUsedPoolVertexBytes);
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool success =
#endif
fVertexPool.appendVertices(this->getVertexSize(),
int indexCount) {
GeometryPoolState& poolState = fGeoPoolStateStack.back();
SkASSERT(0 == poolState.fUsedPoolIndexBytes);
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool success =
#endif
fIndexPool.appendIndices(indexCount,
GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
poolState.fUsedPoolVertexBytes = 0;
poolState.fUsedPoolIndexBytes = 0;
-#if GR_DEBUG
+#ifdef SK_DEBUG
poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
poolState.fPoolStartVertex = ~0;
poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
#include "GrMemoryPool.h"
-#if GR_DEBUG
+#ifdef SK_DEBUG
#define VALIDATE this->validate()
#else
#define VALIDATE
size_t fMinAllocSize;
BlockHeader* fHead;
BlockHeader* fTail;
-#if GR_DEBUG
+#ifdef SK_DEBUG
int fAllocationCnt;
#endif
};
fRRectIndexBuffer =
gpu->createIndexBuffer(sizeof(gRRectIndices), false);
if (NULL != fRRectIndexBuffer) {
-#if GR_DEBUG
+#ifdef SK_DEBUG
bool updated =
#endif
fRRectIndexBuffer->updateData(gRRectIndices,
int onCountOf(const Node* n, const T& t) const;
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const;
int checkNode(Node* n, int* blackHeight) const;
// checks relationship between a node and its children. allowRedRed means
}
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
template <typename T, typename C>
void GrRedBlackTree<T,C>::validate() const {
if (fCount) {
fResource->unref();
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
void GrResourceEntry::validate() const {
SkASSERT(fResource);
SkASSERT(fResource->getCacheEntry() == this);
this->internalDetach(entry, kIgnore_BudgetBehavior);
fCache.remove(entry->key(), entry);
-#if GR_DEBUG
+#ifdef SK_DEBUG
fExclusiveList.addToHead(entry);
#endif
}
void GrResourceCache::makeNonExclusive(GrResourceEntry* entry) {
GrAutoResourceCacheValidate atcv(this);
-#if GR_DEBUG
+#ifdef SK_DEBUG
fExclusiveList.remove(entry);
#endif
fMaxCount = 0;
this->purgeAsNeeded();
-#if GR_DEBUG
+#ifdef SK_DEBUG
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
SkASSERT(countBytes(fExclusiveList) == fClientDetachedBytes);
if (!fCache.count()) {
///////////////////////////////////////////////////////////////////////////////
-#if GR_DEBUG
+#ifdef SK_DEBUG
size_t GrResourceCache::countBytes(const EntryList& list) {
size_t bytes = 0;
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
}
-#endif // GR_DEBUG
+#endif // SK_DEBUG
#if GR_CACHE_STATS
GrResource* resource() const { return fResource; }
const GrResourceKey& key() const { return fKey; }
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const;
#else
void validate() const {}
*/
void purgeAsNeeded(int extraCount = 0, size_t extraBytes = 0);
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const;
#else
void validate() const {}
typedef SkTInternalLList<GrResourceEntry> EntryList;
EntryList fList;
-#if GR_DEBUG
+#ifdef SK_DEBUG
// These objects cannot be returned by a search
EntryList fExclusiveList;
#endif
void internalPurge(int extraCount, size_t extraBytes);
-#if GR_DEBUG
+#ifdef SK_DEBUG
static size_t countBytes(const SkTInternalLList<GrResourceEntry>& list);
#endif
};
///////////////////////////////////////////////////////////////////////////////
-#if GR_DEBUG
+#ifdef SK_DEBUG
class GrAutoResourceCacheValidate {
public:
GrAutoResourceCacheValidate(GrResourceCache* cache) : fCache(cache) {
*/
int slowFindIndex(T* elem) const { return fSorted.find(elem); }
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const;
bool contains(T*) const;
#endif
Gr_bzero(fHash, sizeof(fHash));
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
template <typename T, typename Key, size_t kHashBits>
void GrTHashTable<T, Key, kHashBits>::validate() const {
int count = fSorted.count();
}
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
void GrFontCache::validate() const {
int count = fCache.count();
if (0 == count) {
///////////////////////////////////////////////////////////////////////////////
-#if GR_DEBUG
+#ifdef SK_DEBUG
static int gCounter;
#endif
fMaskFormat = format;
-#if GR_DEBUG
+#ifdef SK_DEBUG
// GrPrintf(" GrTextStrike %p %d\n", this, gCounter);
gCounter += 1;
#endif
fFontScalerKey->unref();
fCache.getArray().visitAll(free_glyph);
-#if GR_DEBUG
+#ifdef SK_DEBUG
gCounter -= 1;
// GrPrintf("~GrTextStrike %p %d\n", this, gCounter);
#endif
}
GrTextStrike* getHeadStrike() const { return fHead; }
-#if GR_DEBUG
+#ifdef SK_DEBUG
void validate() const;
#else
void validate() const {}
#include "GrTexture.h"
GrTextureAccess::GrTextureAccess() {
-#if GR_DEBUG
+#ifdef SK_DEBUG
memcpy(fSwizzle, "void", 5);
fSwizzleMask = 0xbeeffeed;
#endif
#define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X)
-#if GR_DEBUG
+#ifdef SK_DEBUG
#define VALIDATE() this->validate()
#else
#define VALIDATE() do {} while(false)
///////////////////////////////////////////////////////////////////////////
// insert GS
-#if GR_DEBUG
+#ifdef SK_DEBUG
this->genGeometryShader(&builder);
#endif
class GrGpuGL;
-// optionally compile the experimental GS code. Set to GR_DEBUG so that debug build bots will
-// execute the code.
-#define GR_GL_EXPERIMENTAL_GS GR_DEBUG
+#ifdef SK_DEBUG
+ // Optionally compile the experimental GS code. Set to SK_DEBUG so that debug build bots will
+ // execute the code.
+ #define GR_GL_EXPERIMENTAL_GS 1
+#else
+ #define GR_GL_EXPERIMENTAL_GS 0
+#endif
/** This class describes a program to generate. It also serves as a program cache key. Very little
return NULL;
}
-#if 0 && GR_DEBUG
+#if 0 && defined(SK_DEBUG)
static size_t as_size_t(int x) {
return x;
}
GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
GR_GL_DEPTH_ATTACHMENT,
GR_GL_RENDERBUFFER, 0));
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrGLenum status;
GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
if (fHWBoundRenderTarget != rt) {
GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID()));
-#if GR_DEBUG
+#ifdef SK_DEBUG
GrGLenum status;
GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
memmove(fEntries + purgeIdx, fEntries + purgeIdx + 1, copySize);
fEntries[entryIdx - 1] = entry;
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
SkASSERT(NULL != fEntries[0]->fProgram.get());
for (int i = 0; i < fCount - 1; ++i) {
SkASSERT(NULL != fEntries[i + 1]->fProgram.get());
// If we aren't inheriting these as #defines from elsewhere,
// clang demands they be declared before we #include the template
// that relies on them.
-#if GR_DEBUG
+#ifdef SK_DEBUG
static bool LT(const int& elem, int value) {
return elem < value;
}
return entry.fKey == key.fKey;
}
-#if GR_DEBUG
+#ifdef SK_DEBUG
static uint32_t GetHash(const HashElement& entry) {
return entry.fKey;
}
ListElement elements[4]) {
REPORTER_ASSERT(reporter, empty == list.isEmpty());
-#if SK_DEBUG
+#ifdef SK_DEBUG
list.validate();
REPORTER_ASSERT(reporter, numElements == list.countEntries());
REPORTER_ASSERT(reporter, in0 == list.isInList(&elements[0]));