*/
GrTextureEntry* createAndLockTexture(GrTextureKey* key,
const GrSamplerState&,
- const GrGpu::TextureDesc&,
+ const GrTextureDesc&,
void* srcData, size_t rowBytes);
/**
- * When done with an entry, call unlockTexture(entry) on it, which returns
- * it to the cache, where it may be purged.
+ * Returns a texture matching the desc. It's contents are unknown. Subsequent
+ * requests with the same descriptor are not guaranteed to return the same
+ * texture. The same texture is guaranteed not be returned again until it is
+ * unlocked.
*/
- void unlockTexture(GrTextureEntry* entry);
+ GrTextureEntry* lockKeylessTexture(const GrTextureDesc& desc,
+ const GrSamplerState& state);
/**
- * Removes an texture from the cache. This prevents the texture from
- * being found by a subsequent findAndLockTexture() until it is
- * reattached. The entry still counts against the cache's budget and should
- * be reattached when exclusive access is no longer needed.
- */
- void detachCachedTexture(GrTextureEntry*);
-
- /**
- * Reattaches a texture to the cache and unlocks it. Allows it to be found
- * by a subsequent findAndLock or be purged (provided its lock count is
- * now 0.)
+ * When done with an entry, call unlockTexture(entry) on it, which returns
+ * it to the cache, where it may be purged.
*/
- void reattachAndUnlockCachedTexture(GrTextureEntry*);
+ void unlockTexture(GrTextureEntry* entry);
/**
* Creates a texture that is outside the cache. Does not count against
* cache's budget.
*/
- GrTexture* createUncachedTexture(const GrGpu::TextureDesc&,
+ GrTexture* createUncachedTexture(const GrTextureDesc&,
void* srcData,
size_t rowBytes);
static void SetPaint(const GrPaint& paint, GrDrawTarget* target);
- bool finalizeTextureKey(GrTextureKey*, const GrSamplerState&) const;
+ bool finalizeTextureKey(GrTextureKey*,
+ const GrSamplerState&,
+ bool keyless) const;
GrDrawTarget* prepareToDraw(const GrPaint& paint, DrawCategory drawType);
static GrGpu* Create(Engine, Platform3DContext context3D);
/**
- * Used to control the level of antialiasing available for a rendertarget.
- * Anti-alias quality levels depend on the underlying API/GPU capabilities.
- */
- enum AALevels {
- kNone_AALevel, //<! No antialiasing available.
- kLow_AALevel, //<! Low quality antialiased rendering. Actual
- // interpretation is platform-dependent.
- kMed_AALevel, //<! Medium quality antialiased rendering. Actual
- // interpretation is platform-dependent.
- kHigh_AALevel, //<! High quality antialiased rendering. Actual
- // interpretation is platform-dependent.
- };
-
-
- /**
- * Optional bitfield flags that can be passed to createTexture.
- */
- enum TextureFlags {
- kRenderTarget_TextureFlag = 0x1, //<! Creates a texture that can be
- // rendered to by calling
- // GrGpu::setRenderTarget() with
- // GrTexture::asRenderTarget().
- kNoStencil_TextureFlag = 0x2, //<! If the texture is used as a
- // rendertarget but a stencil
- // buffer is not required. Stencil
- // may be required for clipping and
- // path rendering.
- kDynamicUpdate_TextureFlag = 0x4 //!< Hint that the CPU may modify
- // this texture after creation
- };
-
- enum {
- /**
- * For Index8 pixel config, the colortable must be 256 entries
- */
- kColorTableSize = 256 * sizeof(GrColor)
- };
- /**
- * Describes a texture to be created.
- */
- struct TextureDesc {
- uint32_t fFlags; //!< bitfield of TextureFlags
- GrGpu::AALevels fAALevel;//!< The level of antialiasing available
- // for a rendertarget texture. Only
- // flags contains
- // kRenderTarget_TextureFlag.
- uint32_t fWidth; //!< Width of the texture
- uint32_t fHeight; //!< Height of the texture
- GrPixelConfig fFormat; //!< Format of source data of the
- // texture. Not guaraunteed to be the
- // same as internal format used by
- // 3D API.
- };
-
- /**
* Gpu usage statistics.
*/
struct Stats {
*
* @return The texture object if successful, otherwise NULL.
*/
- GrTexture* createTexture(const TextureDesc& desc,
+ GrTexture* createTexture(const GrTextureDesc& desc,
const void* srcData, size_t rowBytes);
/**
* Wraps an externally-created rendertarget in a GrRenderTarget.
virtual void resetContext() = 0;
// overridden by API-specific derived class to create objects.
- virtual GrTexture* onCreateTexture(const TextureDesc& desc,
+ virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
const void* srcData,
size_t rowBytes) = 0;
virtual GrResource* onCreatePlatformSurface(const GrPlatformSurfaceDesc& desc) = 0;
uint16_t width() const { return fP2 & 0xffff; }
uint16_t height() const { return (fP2 >> 16); }
+ uint32_t getPrivateBits() const { return fPrivateBits; }
+
static uint32_t rol(uint32_t x) {
return (x >> 24) | (x << 8);
}
#include <memory.h>
#include <string.h>
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Defines overloaded bitwise operators to make it easier to use an enum as a
+ * bitfield.
+ */
+#define GR_MAKE_BITFIELD_OPS(X) \
+ static inline X operator | (X a, X b) { \
+ return (X) (+a | +b); \
+ } \
+ \
+ static inline X operator & (X a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ template <typename T> \
+ static inline X operator & (T a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ template <typename T> \
+ static inline X operator & (X a, T b) { \
+ return (X) (+a & +b); \
+ } \
+
+////////////////////////////////////////////////////////////////////////////////
+
+
/**
* Macro to round n up to the next multiple of 4, or return it unchanged if
* n is already a multiple of 4
}
/**
+ * Used to control the level of antialiasing available for a rendertarget.
+ * Anti-alias quality levels depend on the underlying API/GPU capabilities.
+ */
+enum GrAALevels {
+ kNone_GrAALevel, //<! No antialiasing available.
+ kLow_GrAALevel, //<! Low quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+ kMed_GrAALevel, //<! Medium quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+ kHigh_GrAALevel, //<! High quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+};
+
+/**
+ * Optional bitfield flags that can be passed to createTexture.
+ */
+enum GrTextureFlags {
+ kNone_GrTextureFlags = 0x0,
+ /**
+ * Creates a texture that can be rendered to as a GrRenderTarget. Use
+ * GrTexture::asRenderTarget() to access.
+ */
+ kRenderTarget_GrTextureFlagBit = 0x1,
+ /**
+ * By default all render targets have an associated stencil buffer that
+ * may be required for path filling. This flag overrides stencil buffer
+ * creation.
+ * MAKE THIS PRIVATE?
+ */
+ kNoStencil_GrTextureFlagBit = 0x2,
+ /**
+ * Hint that the CPU may modify this texture after creation.
+ */
+ kDynamicUpdate_GrTextureFlagBit = 0x4,
+};
+
+GR_MAKE_BITFIELD_OPS(GrTextureFlags)
+
+enum {
+ /**
+ * For Index8 pixel config, the colortable must be 256 entries
+ */
+ kGrColorTableSize = 256 * 4 //sizeof(GrColor)
+};
+
+/**
+ * Describes a texture to be created.
+ */
+struct GrTextureDesc {
+ GrTextureFlags fFlags; //!< bitfield of TextureFlags
+ /**
+ * The level of antialiasing available for a rendertarget texture. Only used
+ * fFlags contains kRenderTarget_GrTextureFlag.
+ */
+ GrAALevels fAALevel;
+ uint32_t fWidth; //!< Width of the texture
+ uint32_t fHeight; //!< Height of the texture
+ /**
+ * Format of source data of the texture. Not guaraunteed to be the same as
+ * internal format used by 3D API.
+ */
+ GrPixelConfig fFormat;
+};
+
+/**
* Set Operations used to construct clips.
*/
enum GrSetOp {
kGrCanResolve_GrPlatformRenderTargetFlagBit = 0x2,
};
-static inline GrPlatformRenderTargetFlags operator | (GrPlatformRenderTargetFlags a, GrPlatformRenderTargetFlags b) {
- return (GrPlatformRenderTargetFlags) (+a | +b);
-}
-
-static inline GrPlatformRenderTargetFlags operator & (GrPlatformRenderTargetFlags a, GrPlatformRenderTargetFlags b) {
- return (GrPlatformRenderTargetFlags) (+a & +b);
-}
+GR_MAKE_BITFIELD_OPS(GrPlatformRenderTargetFlags)
// opaque type for 3D API object handles
typedef intptr_t GrPlatform3DObject;
GrAssert(0 == kA8_GrMaskFormat);
GrAssert(1 == kA565_GrMaskFormat);
if (NULL == fTexture[format]) {
- GrGpu::TextureDesc desc = {
- GrGpu::kDynamicUpdate_TextureFlag,
- GrGpu::kNone_AALevel,
+ GrTextureDesc desc = {
+ kDynamicUpdate_GrTextureFlagBit,
+ kNone_GrAALevel,
GR_ATLAS_TEXTURE_WIDTH,
GR_ATLAS_TEXTURE_HEIGHT,
maskformat2pixelconfig(format)
fFontCache->freeAll();
}
+////////////////////////////////////////////////////////////////////////////////
+
+
+enum {
+ kNPOTBit = 0x1,
+ kFilterBit = 0x2,
+ kKeylessBit = 0x4,
+};
+
+bool GrContext::finalizeTextureKey(GrTextureKey* key,
+ const GrSamplerState& sampler,
+ bool keyless) const {
+ uint32_t bits = 0;
+ uint16_t width = key->width();
+ uint16_t height = key->height();
+
+ if (!fGpu->npotTextureTileSupport()) {
+ bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
+
+ bool tiled = (sampler.getWrapX() != GrSamplerState::kClamp_WrapMode) ||
+ (sampler.getWrapY() != GrSamplerState::kClamp_WrapMode);
+
+ if (tiled && !isPow2) {
+ bits |= kNPOTBit;
+ if (sampler.isFilter()) {
+ bits |= kFilterBit;
+ }
+ }
+ }
+
+ if (keyless) {
+ bits |= kKeylessBit;
+ }
+ key->finalize(bits);
+ return 0 != bits;
+}
+
GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
const GrSamplerState& sampler) {
- finalizeTextureKey(key, sampler);
+ finalizeTextureKey(key, sampler, false);
return fTextureCache->findAndLock(*key);
}
GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
const GrSamplerState& sampler,
- const GrGpu::TextureDesc& desc,
+ const GrTextureDesc& desc,
void* srcData, size_t rowBytes) {
GrAssert(key->width() == desc.fWidth);
GrAssert(key->height() == desc.fHeight);
#endif
GrTextureEntry* entry = NULL;
- bool special = finalizeTextureKey(key, sampler);
+ bool special = finalizeTextureKey(key, sampler, false);
if (special) {
GrTextureEntry* clampEntry;
GrTextureKey clampKey(*key);
return NULL;
}
}
- GrGpu::TextureDesc rtDesc = desc;
- rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
- GrGpu::kNoStencil_TextureFlag;
+ GrTextureDesc rtDesc = desc;
+ rtDesc.fFlags = rtDesc.fFlags |
+ kRenderTarget_GrTextureFlagBit |
+ kNoStencil_GrTextureFlagBit;
rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
fGpu->minRenderTargetWidth()));
rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
- rtDesc.fFlags = 0;
+ rtDesc.fFlags = kNone_GrTextureFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
return entry;
}
-void GrContext::unlockTexture(GrTextureEntry* entry) {
- fTextureCache->unlock(entry);
-}
-
-void GrContext::detachCachedTexture(GrTextureEntry* entry) {
- fTextureCache->detach(entry);
+GrTextureEntry* GrContext::lockKeylessTexture(const GrTextureDesc& desc,
+ const GrSamplerState& state) {
+ uint32_t p0 = desc.fFormat;
+ uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags;
+ GrTextureKey key(p0, p1, desc.fWidth, desc.fHeight);
+ this->finalizeTextureKey(&key, state, true);
+ GrTextureEntry* entry = fTextureCache->findAndLock(key);
+ if (NULL == entry) {
+ GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
+ if (NULL != texture) {
+ entry = fTextureCache->createAndLock(key, texture);
+ }
+ }
+ // If the caller gives us the same desc/sampler twice we don't want
+ // to return the same texture the second time (unless it was previously
+ // released). So we detach the entry from the cache and reattach at release.
+ if (NULL != entry) {
+ fTextureCache->detach(entry);
+ }
+ return entry;
}
-void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
- fTextureCache->reattachAndUnlock(entry);
+void GrContext::unlockTexture(GrTextureEntry* entry) {
+ if (kKeylessBit & entry->key().getPrivateBits()) {
+ fTextureCache->reattachAndUnlock(entry);
+ } else {
+ fTextureCache->unlock(entry);
+ }
}
-GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
+GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc,
void* srcData,
size_t rowBytes) {
return fGpu->createTexture(desc, srcData, rowBytes);
// TODO: when underlying api has a direct way to do this we should use it
// (e.g. glDrawPixels on desktop GL).
- const GrGpu::TextureDesc desc = {
- 0, GrGpu::kNone_AALevel, width, height, config
+ const GrTextureDesc desc = {
+ kNone_GrTextureFlags, kNone_GrAALevel, width, height, config
};
GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
if (NULL == texture) {
#endif
}
-bool GrContext::finalizeTextureKey(GrTextureKey* key,
- const GrSamplerState& sampler) const {
- uint32_t bits = 0;
- uint16_t width = key->width();
- uint16_t height = key->height();
-
-
- if (!fGpu->npotTextureTileSupport()) {
- bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
-
- bool tiled = (sampler.getWrapX() != GrSamplerState::kClamp_WrapMode) ||
- (sampler.getWrapY() != GrSamplerState::kClamp_WrapMode);
-
- if (tiled && !isPow2) {
- bits |= 1;
- bits |= sampler.isFilter() ? 2 : 0;
- }
- }
- key->finalize(bits);
- return 0 != bits;
-}
-
GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
GrDrawTarget* target;
#if DEFER_TEXT_RENDERING
////////////////////////////////////////////////////////////////////////////////
-GrTexture* GrGpu::createTexture(const TextureDesc& desc,
+GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
const void* srcData, size_t rowBytes) {
this->handleDirtyContext();
return this->onCreateTexture(desc, srcData, rowBytes);
GrPrintf("Palette8 support: %s\n", (f8bitPaletteSupport ? "YES" : "NO"));
}
- GR_STATIC_ASSERT(0 == kNone_AALevel);
- GR_STATIC_ASSERT(1 == kLow_AALevel);
- GR_STATIC_ASSERT(2 == kMed_AALevel);
- GR_STATIC_ASSERT(3 == kHigh_AALevel);
+ GR_STATIC_ASSERT(0 == kNone_GrAALevel);
+ GR_STATIC_ASSERT(1 == kLow_GrAALevel);
+ GR_STATIC_ASSERT(2 == kMed_GrAALevel);
+ GR_STATIC_ASSERT(3 == kHigh_GrAALevel);
memset(fAASamples, 0, sizeof(fAASamples));
fMSFBOType = kNone_MSFBO;
GrGLint maxSamples;
GR_GL_GetIntegerv(GR_GL_MAX_SAMPLES, &maxSamples);
if (maxSamples > 1 ) {
- fAASamples[kNone_AALevel] = 0;
- fAASamples[kLow_AALevel] = GrMax(2,
- GrFixedFloorToInt((GR_FixedHalf) *
- maxSamples));
- fAASamples[kMed_AALevel] = GrMax(2,
- GrFixedFloorToInt(((GR_Fixed1*3)/4) *
- maxSamples));
- fAASamples[kHigh_AALevel] = maxSamples;
+ fAASamples[kNone_GrAALevel] = 0;
+ fAASamples[kLow_GrAALevel] = GrMax(2,
+ GrFixedFloorToInt((GR_FixedHalf) *
+ maxSamples));
+ fAASamples[kMed_GrAALevel] = GrMax(2,
+ GrFixedFloorToInt(((GR_Fixed1*3)/4) *
+ maxSamples));
+ fAASamples[kHigh_GrAALevel] = maxSamples;
}
if (gPrintStartupSpew) {
GrPrintf("\tMax Samples: %d\n", maxSamples);
fHWGeometryState.fIndexBuffer = NULL;
fHWGeometryState.fVertexBuffer = NULL;
+
GR_GL(BindBuffer(GR_GL_ARRAY_BUFFER, 0));
GR_GL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
+
fHWGeometryState.fArrayPtrsDirty = true;
GR_GL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
}
#endif
-GrTexture* GrGpuGL::onCreateTexture(const TextureDesc& desc,
+GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
const void* srcData,
size_t rowBytes) {
glDesc.fFormat = desc.fFormat;
glDesc.fOwnsID = true;
- bool renderTarget = 0 != (desc.fFlags & kRenderTarget_TextureFlag);
+ bool renderTarget = 0 != (desc.fFlags & kRenderTarget_GrTextureFlagBit);
if (!canBeTexture(desc.fFormat,
&internalFormat,
&glDesc.fUploadFormat,
GrAssert(as_size_t(desc.fAALevel) < GR_ARRAY_COUNT(fAASamples));
GrGLint samples = fAASamples[desc.fAALevel];
- if (kNone_MSFBO == fMSFBOType && desc.fAALevel != kNone_AALevel) {
+ if (kNone_MSFBO == fMSFBOType && desc.fAALevel != kNone_GrAALevel) {
GrPrintf("AA RT requested but not supported on this platform.");
}
GrAssert(desc.fWidth == glDesc.fAllocWidth);
GrAssert(desc.fHeight == glDesc.fAllocHeight);
GrGLsizei imageSize = glDesc.fAllocWidth * glDesc.fAllocHeight +
- kColorTableSize;
+ kGrColorTableSize;
GR_GL(CompressedTexImage2D(GR_GL_TEXTURE_2D, 0, glDesc.fUploadFormat,
glDesc.fAllocWidth, glDesc.fAllocHeight,
0, imageSize, srcData));
} else {
rtIDs.fRTFBOID = rtIDs.fTexFBOID;
}
- if (!(kNoStencil_TextureFlag & desc.fFlags)) {
+ if (!(kNoStencil_GrTextureFlagBit & desc.fFlags)) {
GR_GL(GenRenderbuffers(1, &rtIDs.fStencilRenderbufferID));
GrAssert(0 != rtIDs.fStencilRenderbufferID);
}
fHWDrawState.fRenderTarget = NULL;
// clear the new stencil buffer if we have one
- if (!(desc.fFlags & kNoStencil_TextureFlag)) {
+ if (!(desc.fFlags & kNoStencil_GrTextureFlagBit)) {
GrRenderTarget* rtSave = fCurrDrawState.fRenderTarget;
fCurrDrawState.fRenderTarget = rt;
this->clearStencil(0, ~0);
// overrides from GrGpu
virtual void resetContext();
- virtual GrTexture* onCreateTexture(const TextureDesc& desc,
+ virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
const void* srcData,
size_t rowBytes);
virtual GrVertexBuffer* onCreateVertexBuffer(uint32_t size,
if (fCache) {
GrAssert(NULL != fTexture);
GrAssert(fRenderTarget == fTexture->asRenderTarget());
- // IMPORTANT: reattach the rendertarget/tex back to the cache.
- fContext->reattachAndUnlockCachedTexture((GrTextureEntry*)fCache);
+ fContext->unlockTexture((GrTextureEntry*)fCache);
} else if (NULL != fTexture) {
GrAssert(!CACHE_LAYER_TEXTURES);
GrAssert(fRenderTarget == fTexture->asRenderTarget());
void SkGpuDevice::drawRect(const SkDraw& draw, const SkRect& rect,
const SkPaint& paint) {
- CHECK_SHOULD_DRAW(draw);\r
-\r
- bool doStroke = paint.getStyle() == SkPaint::kStroke_Style;\r
- SkScalar width = paint.getStrokeWidth();\r
-\r
- /*\r
- We have special code for hairline strokes, miter-strokes, and fills.\r
- Anything else we just call our path code.\r
- */\r
- bool usePath = doStroke && width > 0 &&\r
- paint.getStrokeJoin() != SkPaint::kMiter_Join;\r
- // another reason we might need to call drawPath...\r
- if (paint.getMaskFilter()) {\r
- usePath = true;\r
- }\r
-\r
- if (usePath) {\r
- SkPath path;\r
- path.addRect(rect);\r
- this->drawPath(draw, path, paint, NULL, true);\r
- return;\r
- }\r
-\r
- GrPaint grPaint;\r
- SkAutoCachedTexture act;\r
- if (!this->skPaint2GrPaintShader(paint, &act, *draw.fMatrix, &grPaint)) {\r
- return;\r
- }\r
+ CHECK_SHOULD_DRAW(draw);
+
+ bool doStroke = paint.getStyle() == SkPaint::kStroke_Style;
+ SkScalar width = paint.getStrokeWidth();
+
+ /*
+ We have special code for hairline strokes, miter-strokes, and fills.
+ Anything else we just call our path code.
+ */
+ bool usePath = doStroke && width > 0 &&
+ paint.getStrokeJoin() != SkPaint::kMiter_Join;
+ // another reason we might need to call drawPath...
+ if (paint.getMaskFilter()) {
+ usePath = true;
+ }
+
+ if (usePath) {
+ SkPath path;
+ path.addRect(rect);
+ this->drawPath(draw, path, paint, NULL, true);
+ return;
+ }
+
+ GrPaint grPaint;
+ SkAutoCachedTexture act;
+ if (!this->skPaint2GrPaintShader(paint, &act, *draw.fMatrix, &grPaint)) {
+ return;
+ }
fContext->drawRect(grPaint, Sk2Gr(rect), doStroke ? width : -1);
}
GrAutoMatrix avm(context, GrMatrix::I());
- const GrGpu::TextureDesc desc = {
- 0,
- GrGpu::kNone_AALevel,
+ const GrTextureDesc desc = {
+ kNone_GrTextureFlags,
+ kNone_GrAALevel,
dstM.fBounds.width(),
dstM.fBounds.height(),
kAlpha_8_GrPixelConfig
///////////////////////////////////////////////////////////////////////////////
SkGpuDevice::TexCache* SkGpuDevice::lockCachedTexture(const SkBitmap& bitmap,
- const GrSamplerState& sampler,
- GrTexture** texture,
- bool forDeviceRenderTarget) {
+ const GrSamplerState& sampler,
+ GrTexture** texture,
+ bool forDeviceRenderTarget) {
+ GrTexture* newTexture = NULL;
+ GrTextureEntry* entry = NULL;
GrContext* ctx = this->context();
- uint32_t p0, p1;
+
if (forDeviceRenderTarget) {
- p0 = p1 = -1;
+ const GrTextureDesc desc = {
+ kRenderTarget_GrTextureFlagBit,
+ kNone_GrAALevel,
+ bitmap.width(),
+ bitmap.height(),
+ SkGr::Bitmap2PixelConfig(bitmap)
+ };
+ entry = ctx->lockKeylessTexture(desc, sampler);
} else {
+ uint32_t p0, p1;
p0 = bitmap.getGenerationID();
p1 = bitmap.pixelRefOffset();
- }
-
- GrTexture* newTexture = NULL;
- GrTextureKey key(p0, p1, bitmap.width(), bitmap.height());
- GrTextureEntry* entry = ctx->findAndLockTexture(&key, sampler);
- if (NULL == entry) {
+ GrTextureKey key(p0, p1, bitmap.width(), bitmap.height());
+ entry = ctx->findAndLockTexture(&key, sampler);
- if (forDeviceRenderTarget) {
- const GrGpu::TextureDesc desc = {
- GrGpu::kRenderTarget_TextureFlag,
- GrGpu::kNone_AALevel,
- bitmap.width(),
- bitmap.height(),
- SkGr::Bitmap2PixelConfig(bitmap)
- };
- entry = ctx->createAndLockTexture(&key, sampler, desc, NULL, 0);
-
- } else {
- entry = sk_gr_create_bitmap_texture(ctx, &key, sampler, bitmap);
- }
if (NULL == entry) {
- GrPrintf("---- failed to create texture for cache [%d %d]\n",
- bitmap.width(), bitmap.height());
+ entry = sk_gr_create_bitmap_texture(ctx, &key, sampler, bitmap);
+ if (NULL == entry) {
+ GrPrintf("---- failed to create texture for cache [%d %d]\n",
+ bitmap.width(), bitmap.height());
+ }
}
}
if (texture) {
*texture = newTexture;
}
- // IMPORTANT: We can't allow another SkGpuDevice to get this
- // cache entry until this one is destroyed!
- if (forDeviceRenderTarget) {
- ctx->detachCachedTexture(entry);
- }
}
return (TexCache*)entry;
}
ctable->unlockColors(false);
// always skip a full 256 number of entries, even if we memcpy'd fewer
- dst += GrGpu::kColorTableSize;
+ dst += kGrColorTableSize;
if (bitmap.width() == bitmap.rowBytes()) {
memcpy(dst, bitmap.getPixels(), bitmap.getSize());
const SkBitmap* bitmap = &origBitmap;
- GrGpu::TextureDesc desc = {
- 0,
- GrGpu::kNone_AALevel,
+ GrTextureDesc desc = {
+ kNone_GrTextureFlags,
+ kNone_GrAALevel,
bitmap->width(),
bitmap->height(),
SkGr::Bitmap2PixelConfig(*bitmap)
if (ctx->supportsIndex8PixelConfig(sampler,
bitmap->width(), bitmap->height())) {
size_t imagesize = bitmap->width() * bitmap->height() +
- GrGpu::kColorTableSize;
+ kGrColorTableSize;
SkAutoMalloc storage(imagesize);
build_compressed_data(storage.get(), origBitmap);