'gr_sources': [
'<(skia_include_path)/gpu/GrAARectRenderer.h',
'<(skia_include_path)/gpu/GrBackendEffectFactory.h',
- '<(skia_include_path)/gpu/GrCacheID.h',
'<(skia_include_path)/gpu/GrClipData.h',
'<(skia_include_path)/gpu/GrColor.h',
'<(skia_include_path)/gpu/GrConfig.h',
+++ /dev/null
-/*
- * Copyright 2012 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrCacheID_DEFINED
-#define GrCacheID_DEFINED
-
-#include "GrTypes.h"
-
-///////////////////////////////////////////////////////////////////////////////
-#define GR_DECLARE_RESOURCE_CACHE_TYPE() \
- static int8_t GetResourceType();
-
-#define GR_DEFINE_RESOURCE_CACHE_TYPE(ClassName) \
- int8_t ClassName::GetResourceType() { \
- static int8_t kResourceTypeID = 0; \
- if (0 == kResourceTypeID) { \
- kResourceTypeID = GrCacheID::GetNextResourceType(); \
- } \
- return kResourceTypeID; \
- }
-
-
-///////////////////////////////////////////////////////////////////////////////
-#define GR_DECLARE_RESOURCE_CACHE_DOMAIN(AccessorName) \
- static int8_t AccessorName();
-
-#define GR_DEFINE_RESOURCE_CACHE_DOMAIN(ClassName, AccessorName) \
- int8_t ClassName::AccessorName() { \
- static int8_t kDomainID = 0; \
- if (0 == kDomainID) { \
- kDomainID = GrCacheID::GetNextDomain(); \
- } \
- return kDomainID; \
- }
-
-/**
- * The cache ID adds structure to the IDs used for caching GPU resources. It
- * is broken into three portions:
- * the public portion - which is filled in by Skia clients
- * the private portion - which is used by the cache (domain & type)
- * the resource-specific portion - which is filled in by each GrResource-
- * derived class.
- *
- * For the public portion each client of the cache makes up its own
- * unique-per-resource identifier (e.g., bitmap genID). A public ID of
- * 'kScratch_CacheID' indicates that the resource is a "scratch" resource.
- * When used to acquire a resource it indicates the cache user is
- * looking for a resource that matches a resource-subclass-specific set of
- * \93dimensions\94 such as width, height, buffer size, or pixel config, but not
- * for particular resource contents (e.g., texel or vertex values). The public
- * IDs are unique within a private ID value but not necessarily across
- * private IDs.
- *
- * The domain portion identifies the cache client while the type field
- * indicates the resource type. When the public portion indicates that the
- * resource is a scratch resource, the domain field should be kUnrestricted
- * so that scratch resources can be recycled across domains.
- */
-class GrCacheID {
-public:
- uint64_t fPublicID;
-
- uint32_t fResourceSpecific32;
-
- uint8_t fDomain;
-private:
- uint8_t fResourceType;
-
-public:
- uint16_t fResourceSpecific16;
-
- GrCacheID(uint8_t resourceType)
- : fPublicID(kDefaultPublicCacheID)
- , fDomain(GrCacheData::kScratch_ResourceDomain)
- , fResourceType(resourceType) {
- }
-
- void toRaw(uint32_t v[4]);
-
- uint8_t getResourceType() const { return fResourceType; }
-
- /*
- * Default value for public portion of GrCacheID
- */
- static const uint64_t kDefaultPublicCacheID = 0;
-
- static const uint8_t kInvalid_ResourceType = 0;
-
- static uint8_t GetNextDomain();
- static uint8_t GetNextResourceType();
-
-
-};
-
-#endif // GrCacheID_DEFINED
// Textures
/**
- * Create a new entry, based on the specified key and texture, and return
- * a "locked" texture. Must call be balanced with an unlockTexture() call.
+ * Create a new entry, based on the specified key and texture and return it.
*
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
* @param desc Description of the texture properties.
- * @param cacheData Cache-specific properties (e.g., texture gen ID)
+ * @param cacheID Cache-specific properties (e.g., texture gen ID)
* @param srcData Pointer to the pixel values.
* @param rowBytes The number of bytes between rows of the texture. Zero
* implies tightly packed rows.
*/
GrTexture* createTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
void* srcData, size_t rowBytes);
/**
* return it. The return value will be NULL if not found.
*
* @param desc Description of the texture properties.
- * @param cacheData Cache-specific properties (e.g., texture gen ID)
+ * @param cacheID Cache-specific properties (e.g., texture gen ID)
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
*/
GrTexture* findTexture(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
const GrTextureParams* params);
/**
* Determines whether a texture is in the cache. If the texture is found it
* the texture for deletion.
*/
bool isTextureInCache(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
const GrTextureParams* params) const;
/**
* such an API will create gaps in the tiling pattern. This includes clamp
* mode. (This may be addressed in a future update.)
*/
- GrTexture* lockScratchTexture(const GrTextureDesc& desc,
- ScratchTexMatch match);
+ GrTexture* lockScratchTexture(const GrTextureDesc&, ScratchTexMatch match);
/**
- * When done with an entry, call unlockTexture(entry) on it, which returns
+ * When done with an entry, call unlockScratchTexture(entry) on it, which returns
* it to the cache, where it may be purged.
*/
void unlockScratchTexture(GrTexture* texture);
void internalDrawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke);
GrTexture* createResizedTexture(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
void* srcData,
size_t rowBytes,
bool needsFiltering);
GrAutoScratchTexture(GrContext* context,
const GrTextureDesc& desc,
- GrContext::ScratchTexMatch match =
- GrContext::kApprox_ScratchTexMatch)
+ GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch)
: fContext(NULL)
, fTexture(NULL) {
this->set(context, desc, match);
GrTexture* set(GrContext* context,
const GrTextureDesc& desc,
- GrContext::ScratchTexMatch match =
- GrContext::kApprox_ScratchTexMatch) {
+ GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch) {
this->reset();
fContext = context;
#define GrTexture_DEFINED
#include "GrSurface.h"
-#include "GrCacheID.h"
class GrRenderTarget;
class GrResourceKey;
public:
SK_DECLARE_INST_COUNT(GrTexture)
- GR_DECLARE_RESOURCE_CACHE_TYPE()
-
// from GrResource
/**
* Informational texture flags
#else
void validate() const {}
#endif
-
static GrResourceKey ComputeKey(const GrGpu* gpu,
- const GrTextureParams* sampler,
+ const GrTextureParams* params,
const GrTextureDesc& desc,
- const GrCacheData& cacheData,
- bool scratch);
-
+ const GrCacheID& cacheID);
+ static GrResourceKey ComputeScratchKey(const GrTextureDesc& desc);
static bool NeedsResizing(const GrResourceKey& key);
- static bool IsScratchTexture(const GrResourceKey& key);
static bool NeedsFiltering(const GrResourceKey& key);
protected:
};
/**
- * GrCacheData holds user-provided cache-specific data. It is used in
- * combination with the GrTextureDesc to construct a cache key for texture
- * resources.
+ * GrCacheID is used create and find cached GrResources (e.g. GrTextures). The ID has two parts:
+ * the domain and the key. Domains simply allow multiple clients to use 0-based indices as their
+ * cache key without colliding. The key uniquely identifies a GrResource within the domain.
+ * Users of the cache must obtain a domain via GenerateDomain().
*/
-struct GrCacheData {
- /*
- * Scratch textures should all have this value as their fClientCacheID
- */
- static const uint64_t kScratch_CacheID = 0xBBBBBBBB;
-
- /**
- * Resources in the "scratch" domain can be used by any domain. All
- * scratch textures will have this as their domain.
- */
- static const uint8_t kScratch_ResourceDomain = 0;
-
-
- // No default constructor is provided since, if you are creating one
- // of these, you should definitely have a key (or be using the scratch
- // key).
- GrCacheData(uint64_t key)
- : fClientCacheID(key)
- , fResourceDomain(kScratch_ResourceDomain) {
- }
+struct GrCacheID {
+public:
+ typedef uint8_t Domain;
+
+ struct Key {
+ union {
+ uint8_t fData8[16];
+ uint32_t fData32[4];
+ uint64_t fData64[2];
+ };
+ };
/**
- * A user-provided texture ID. It should be unique to the texture data and
- * does not need to take into account the width or height. Two textures
- * with the same ID but different dimensions will not collide. This field
- * is only relevant for textures that will be cached.
+ * A default cache ID is invalid; a set method must be called before the object is used.
*/
- uint64_t fClientCacheID;
+ GrCacheID() { fDomain = kInvalid_Domain; }
/**
- * Allows cache clients to cluster their textures inside domains (e.g.,
- * alpha clip masks). Only relevant for cached textures.
+ * Initialize the cache ID to a domain and key.
*/
- uint8_t fResourceDomain;
+ GrCacheID(Domain domain, const Key& key) {
+ GrAssert(kInvalid_Domain != domain);
+ this->reset(domain, key);
+ }
+
+ void reset(Domain domain, const Key& key) {
+ fDomain = domain;
+ memcpy(&fKey, &key, sizeof(Key));
+ }
+
+ /** Has this been initialized to a valid domain */
+ bool isValid() const { return kInvalid_Domain != fDomain; }
+
+ const Key& getKey() const { GrAssert(this->isValid()); return fKey; }
+ Domain getDomain() const { GrAssert(this->isValid()); return fDomain; }
+
+ /** Creates a new unique ID domain. */
+ static Domain GenerateDomain();
+
+private:
+ Key fKey;
+ Domain fDomain;
+
+ static const Domain kInvalid_Domain = 0;
};
/**
class SkAutoCachedTexture; // used internally
protected:
- bool isBitmapInTextureCache(const SkBitmap& bitmap,
- const GrTextureParams& params) const;
-
// overrides from SkDevice
virtual bool onReadPixels(const SkBitmap& bitmap,
int x, int y,
////////////////////////////////////////////////////////////////////////////////
-GrTexture* GrLockCachedBitmapTexture(GrContext*,
- const SkBitmap&,
- const GrTextureParams*);
+bool GrIsBitmapInCache(const GrContext*, const SkBitmap&, const GrTextureParams*);
+
+GrTexture* GrLockCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
void GrUnlockCachedBitmapTexture(GrTexture*);
* Hash function class that can take a data chunk of any predetermined length. The hash function
* used is the One-at-a-Time Hash (http://burtleburtle.net/bob/hash/doobs.html).
*
- * Keys are computed from Entry objects. Entry must be fully ordered by a member:
- * int compare(const GrTBinHashKey<Entry, ..>& k);
- * which returns negative if the Entry < k, 0 if it equals k, and positive if k < the Entry.
- * Additionally, Entry must be flattenable into the key using setKeyData.
+ * Keys are computed from ENTRY objects. ENTRY must be fully ordered by a member:
+ * int compare(const GrTBinHashKey<ENTRY, ..>& k);
+ * which returns negative if the ENTRY < k, 0 if it equals k, and positive if k < the ENTRY.
+ * Additionally, ENTRY must be flattenable into the key using setKeyData.
*
* This class satisfies the requirements to be a key for a GrTHashTable.
*/
-template<typename Entry, size_t KeySize>
+template<typename ENTRY, size_t KEY_SIZE>
class GrTBinHashKey {
public:
+ enum { kKeySize = KEY_SIZE };
+
GrTBinHashKey() {
this->reset();
}
- GrTBinHashKey(const GrTBinHashKey<Entry, KeySize>& other) {
+ GrTBinHashKey(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) {
*this = other;
}
- GrTBinHashKey<Entry, KeySize>& operator=(const GrTBinHashKey<Entry, KeySize>& other) {
+ GrTBinHashKey<ENTRY, KEY_SIZE>& operator=(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) {
memcpy(this, &other, sizeof(*this));
return *this;
}
}
void setKeyData(const uint32_t* SK_RESTRICT data) {
- GrAssert(GrIsALIGN4(KeySize));
- memcpy(&fData, data, KeySize);
+ GrAssert(GrIsALIGN4(KEY_SIZE));
+ memcpy(&fData, data, KEY_SIZE);
uint32_t hash = 0;
- size_t len = KeySize;
+ size_t len = KEY_SIZE;
while (len >= 4) {
hash += *data++;
hash += (fHash << 10);
fHash = hash;
}
- int compare(const GrTBinHashKey<Entry, KeySize>& key) const {
+ int compare(const GrTBinHashKey<ENTRY, KEY_SIZE>& key) const {
GrAssert(fIsValid && key.fIsValid);
- return memcmp(fData, key.fData, KeySize);
+ return memcmp(fData, key.fData, KEY_SIZE);
}
- static bool EQ(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) {
+ static bool EQ(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
GrAssert(key.fIsValid);
return 0 == entry.compare(key);
}
- static bool LT(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) {
+ static bool LT(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
GrAssert(key.fIsValid);
return entry.compare(key) < 0;
}
return fHash;
}
+ const uint8_t* getData() const {
+ GrAssert(fIsValid);
+ return fData;
+ }
+
private:
uint32_t fHash;
- uint8_t fData[KeySize]; // Buffer for key storage
+ uint8_t fData[KEY_SIZE]; // Buffer for key storage
#if GR_DEBUG
public:
* found in the LICENSE file.
*/
-#include "GrCacheID.h"
+#include "GrTypes.h"
#include "SkThread.h" // for sk_atomic_inc
-uint8_t GrCacheID::GetNextDomain() {
- // 0 reserved for kUnrestricted_ResourceDomain
- static int32_t gNextDomain = 1;
+static const GrCacheID::Key kAssertKey;
+GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData32));
+GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData64));
+GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey));
+
+GrCacheID::Domain GrCacheID::GenerateDomain() {
+ static int32_t gNextDomain = kInvalid_Domain + 1;
int32_t domain = sk_atomic_inc(&gNextDomain);
- if (domain >= 256) {
+ if (domain >= 1 << (8 * sizeof(Domain))) {
GrCrash("Too many Cache Domains");
}
- return (uint8_t) domain;
-}
-
-uint8_t GrCacheID::GetNextResourceType() {
- // 0 reserved for kInvalid_ResourceType
- static int32_t gNextResourceType = 1;
-
- int32_t type = sk_atomic_inc(&gNextResourceType);
- if (type >= 256) {
- GrCrash("Too many Cache Resource Types");
- }
-
- return (uint8_t) type;
-}
-
-void GrCacheID::toRaw(uint32_t v[4]) {
- GrAssert(4*sizeof(uint32_t) == sizeof(GrCacheID));
-
- v[0] = (uint32_t) (fPublicID & 0xffffffffUL);
- v[1] = (uint32_t) ((fPublicID >> 32) & 0xffffffffUL);
- v[2] = fResourceSpecific32;
- v[3] = fDomain << 24 |
- fResourceType << 16 |
- fResourceSpecific16;
+ return static_cast<Domain>(domain);
}
#include "GrAAConvexPathRenderer.h"
#include "GrAAHairLinePathRenderer.h"
#include "GrSWMaskHelper.h"
-#include "GrCacheID.h"
#include "SkTLazy.h"
-GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrClipMaskManager, GetAlphaMaskDomain)
-
#define GR_AA_CLIP 1
typedef SkClipStack::Element Element;
*/
class GrClipMaskManager : public GrNoncopyable {
public:
- GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetAlphaMaskDomain)
-
GrClipMaskManager()
: fGpu(NULL)
, fCurrClipMaskType(kNone_ClipMaskType) {
}
+////////////////////////////////////////////////////////////////////////////////
+
GrTexture* GrContext::findTexture(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
const GrTextureParams* params) {
- GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
+ GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
GrResource* resource = fTextureCache->find(resourceKey);
return static_cast<GrTexture*>(resource);
}
bool GrContext::isTextureInCache(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
const GrTextureParams* params) const {
- GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
+ GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
return fTextureCache->hasKey(resourceKey);
}
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
void* srcData,
size_t rowBytes,
bool needsFiltering) {
- GrTexture* clampedTexture = this->findTexture(desc, cacheData, NULL);
+ GrTexture* clampedTexture = this->findTexture(desc, cacheID, NULL);
if (NULL == clampedTexture) {
- clampedTexture = this->createTexture(NULL, desc, cacheData, srcData, rowBytes);
+ clampedTexture = this->createTexture(NULL, desc, cacheID, srcData, rowBytes);
if (NULL == clampedTexture) {
return NULL;
GrTexture* GrContext::createTexture(
const GrTextureParams* params,
const GrTextureDesc& desc,
- const GrCacheData& cacheData,
+ const GrCacheID& cacheID,
void* srcData,
size_t rowBytes) {
- SK_TRACE_EVENT0("GrContext::createAndLockTexture");
+ SK_TRACE_EVENT0("GrContext::createTexture");
#if GR_DUMP_TEXTURE_UPLOAD
- GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
+ GrPrintf("GrContext::createTexture[%d %d]\n", desc.fWidth, desc.fHeight);
#endif
- GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
+ GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
SkAutoTUnref<GrTexture> texture;
if (GrTexture::NeedsResizing(resourceKey)) {
- texture.reset(this->createResizedTexture(desc, cacheData,
- srcData, rowBytes,
- GrTexture::NeedsFiltering(resourceKey)));
+ texture.reset(this->createResizedTexture(desc, cacheID,
+ srcData, rowBytes,
+ GrTexture::NeedsFiltering(resourceKey)));
} else {
texture.reset(fGpu->createTexture(desc, srcData, rowBytes));
}
return texture;
}
-GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
- ScratchTexMatch match) {
+GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
GrTextureDesc desc = inDesc;
- GrCacheData cacheData(GrCacheData::kScratch_CacheID);
GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) ||
!(desc.fFlags & kNoStencil_GrTextureFlagBit));
- if (kExact_ScratchTexMatch != match) {
+ if (kApprox_ScratchTexMatch == match) {
// bin by pow2 with a reasonable min
static const int MIN_SIZE = 256;
desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
bool doubledH = false;
do {
- GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true);
+ GrResourceKey key = GrTexture::ComputeScratchKey(desc);
// Ensure we have exclusive access to the texture so future 'find' calls don't return it
resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
// if we miss, relax the fit of the flags...
desc.fHeight = origHeight;
SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0));
if (NULL != texture) {
- GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL,
- texture->desc(),
- cacheData,
- true);
+ GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
// Make the resource exclusive so future 'find' calls don't return it
fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
resource = texture;
// If this is a scratch texture we detached it from the cache
// while it was locked (to avoid two callers simultaneously getting
// the same texture).
- if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) {
+ if (texture->getCacheEntry()->key().isScratch()) {
fTextureCache->makeNonExclusive(texture->getCacheEntry());
}
#include "GrResourceCache.h"
#include "GrResource.h"
+
+GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() {
+ static int32_t gNextType = 0;
+
+ int32_t type = sk_atomic_inc(&gNextType);
+ if (type >= (1 << 8 * sizeof(ResourceType))) {
+ GrCrash("Too many Resource Types");
+ }
+
+ return static_cast<ResourceType>(type);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
: fKey(key), fResource(resource) {
// we assume ownership of the resource, and will unref it when we die
///////////////////////////////////////////////////////////////////////////////
-class GrResourceCache::Key {
- typedef GrResourceEntry T;
-
- const GrResourceKey& fKey;
-public:
- Key(const GrResourceKey& key) : fKey(key) {}
-
- uint32_t getHash() const { return fKey.hashIndex(); }
-
- static bool LT(const T& entry, const Key& key) {
- return entry.key() < key.fKey;
- }
- static bool EQ(const T& entry, const Key& key) {
- return entry.key() == key.fKey;
- }
-#if GR_DEBUG
- static uint32_t GetHash(const T& entry) {
- return entry.key().hashIndex();
- }
- static bool LT(const T& a, const T& b) {
- return a.key() < b.key();
- }
- static bool EQ(const T& a, const T& b) {
- return a.key() == b.key();
- }
-#endif
-};
-
-///////////////////////////////////////////////////////////////////////////////
-
GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
fMaxCount(maxCount),
fMaxBytes(maxBytes) {
#include "GrConfig.h"
#include "GrTypes.h"
#include "GrTHashCache.h"
+#include "GrBinHashKey.h"
#include "SkTInternalLList.h"
class GrResource;
+class GrResourceEntry;
-// return true if a<b, or false if b<a
-//
-#define RET_IF_LT_OR_GT(a, b) \
- do { \
- if ((a) < (b)) { \
- return true; \
- } \
- if ((b) < (a)) { \
- return false; \
- } \
- } while (0)
-
-/**
- * Helper class for GrResourceCache, the Key is used to identify src data for
- * a resource. It is identified by 2 32bit data fields which can hold any
- * data (uninterpreted by the cache) and a width/height.
- */
class GrResourceKey {
public:
enum {
kHashMask = kHashCount - 1
};
- GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) {
- fP[0] = p0;
- fP[1] = p1;
- fP[2] = p2;
- fP[3] = p3;
- this->computeHashIndex();
+ static GrCacheID::Domain ScratchDomain() {
+ static const GrCacheID::Domain gDomain = GrCacheID::GenerateDomain();
+ return gDomain;
}
- GrResourceKey(uint32_t v[4]) {
- memcpy(fP, v, 4 * sizeof(uint32_t));
- this->computeHashIndex();
- }
+ /** Uniquely identifies the GrResource subclass in the key to avoid collisions
+ across resource types. */
+ typedef uint8_t ResourceType;
+
+ /** Flags set by the GrResource subclass. */
+ typedef uint8_t ResourceFlags;
+
+ /** Generate a unique ResourceType */
+ static ResourceType GenerateResourceType();
+
+ /** Creates a key for resource */
+ GrResourceKey(const GrCacheID& id, ResourceType type, ResourceFlags flags) {
+ this->init(id.getDomain(), id.getKey(), type, flags);
+ };
GrResourceKey(const GrResourceKey& src) {
- memcpy(fP, src.fP, 4 * sizeof(uint32_t));
-#if GR_DEBUG
- this->computeHashIndex();
- GrAssert(fHashIndex == src.fHashIndex);
-#endif
- fHashIndex = src.fHashIndex;
+ fKey = src.fKey;
}
- //!< returns hash value [0..kHashMask] for the key
- int hashIndex() const { return fHashIndex; }
+ GrResourceKey() {
+ fKey.fHashedKey.reset();
+ }
- friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) {
- GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
- return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t));
+ void reset(const GrCacheID& id, ResourceType type, ResourceFlags flags) {
+ this->init(id.getDomain(), id.getKey(), type, flags);
}
- friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) {
- GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
- return !(a == b);
+ //!< returns hash value [0..kHashMask] for the key
+ int getHash() const {
+ return fKey.fHashedKey.getHash() & kHashMask;
}
- friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) {
- RET_IF_LT_OR_GT(a.fP[0], b.fP[0]);
- RET_IF_LT_OR_GT(a.fP[1], b.fP[1]);
- RET_IF_LT_OR_GT(a.fP[2], b.fP[2]);
- return a.fP[3] < b.fP[3];
+ bool isScratch() const {
+ return ScratchDomain() ==
+ *reinterpret_cast<const GrCacheID::Domain*>(fKey.fHashedKey.getData() +
+ kCacheIDDomainOffset);
}
- uint32_t getValue32(int i) const {
- GrAssert(i >=0 && i < 4);
- return fP[i];
+ ResourceType getResourceType() const {
+ return *reinterpret_cast<const ResourceType*>(fKey.fHashedKey.getData() +
+ kResourceTypeOffset);
+ }
+
+ ResourceFlags getResourceFlags() const {
+ return *reinterpret_cast<const ResourceFlags*>(fKey.fHashedKey.getData() +
+ kResourceFlagsOffset);
}
-private:
- static uint32_t rol(uint32_t x) {
- return (x >> 24) | (x << 8);
+ int compare(const GrResourceKey& other) const {
+ return fKey.fHashedKey.compare(other.fKey.fHashedKey);
}
- static uint32_t ror(uint32_t x) {
- return (x >> 8) | (x << 24);
+
+ static bool LT(const GrResourceKey& a, const GrResourceKey& b) {
+ return a.compare(b) < 0;
}
- static uint32_t rohalf(uint32_t x) {
- return (x >> 16) | (x << 16);
+
+ static bool EQ(const GrResourceKey& a, const GrResourceKey& b) {
+ return 0 == a.compare(b);
}
- void computeHashIndex() {
- uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]);
- // this way to mix and reduce hash to its index may have to change
- // depending on how many bits we allocate to the index
- hash ^= hash >> 16;
- hash ^= hash >> 8;
- fHashIndex = hash & kHashMask;
+ inline static bool LT(const GrResourceEntry& entry, const GrResourceKey& key);
+ inline static bool EQ(const GrResourceEntry& entry, const GrResourceKey& key);
+ inline static bool LT(const GrResourceEntry& a, const GrResourceEntry& b);
+ inline static bool EQ(const GrResourceEntry& a, const GrResourceEntry& b);
+
+private:
+ enum {
+ kCacheIDKeyOffset = 0,
+ kCacheIDDomainOffset = kCacheIDKeyOffset + sizeof(GrCacheID::Key),
+ kResourceTypeOffset = kCacheIDDomainOffset + sizeof(GrCacheID::Domain),
+ kResourceFlagsOffset = kResourceTypeOffset + sizeof(ResourceType),
+ kPadOffset = kResourceFlagsOffset + sizeof(ResourceFlags),
+ kKeySize = SkAlign4(kPadOffset),
+ kPadSize = kKeySize - kPadOffset
+ };
+
+ void init(const GrCacheID::Domain domain,
+ const GrCacheID::Key& key,
+ ResourceType type,
+ ResourceFlags flags) {
+ union {
+ uint8_t fKey8[kKeySize];
+ uint32_t fKey32[kKeySize / 4];
+ } keyData;
+
+ uint8_t* k = keyData.fKey8;
+ memcpy(k + kCacheIDKeyOffset, key.fData8, sizeof(GrCacheID::Key));
+ memcpy(k + kCacheIDDomainOffset, &domain, sizeof(GrCacheID::Domain));
+ memcpy(k + kResourceTypeOffset, &type, sizeof(ResourceType));
+ memcpy(k + kResourceFlagsOffset, &flags, sizeof(ResourceFlags));
+ memset(k + kPadOffset, 0, kPadSize);
+ fKey.fHashedKey.setKeyData(keyData.fKey32);
}
- uint32_t fP[4];
+ struct Key;
+ typedef GrTBinHashKey<Key, kKeySize> HashedKey;
- // this is computed from the fP... fields
- int fHashIndex;
+ struct Key {
+ int compare(const HashedKey& hashedKey) const {
+ fHashedKey.compare(fHashedKey);
+ }
+ HashedKey fHashedKey;
+ };
- friend class GrContext;
+ Key fKey;
};
-
///////////////////////////////////////////////////////////////////////////////
class GrResourceEntry {
friend class GrDLinkedList;
};
+bool GrResourceKey::LT(const GrResourceEntry& entry, const GrResourceKey& key) {
+ return LT(entry.key(), key);
+}
+
+bool GrResourceKey::EQ(const GrResourceEntry& entry, const GrResourceKey& key) {
+ return EQ(entry.key(), key);
+}
+
+bool GrResourceKey::LT(const GrResourceEntry& a, const GrResourceEntry& b) {
+ return LT(a.key(), b.key());
+}
+
+bool GrResourceKey::EQ(const GrResourceEntry& a, const GrResourceEntry& b) {
+ return EQ(a.key(), b.key());
+}
+
///////////////////////////////////////////////////////////////////////////////
#include "GrTHashCache.h"
void removeInvalidResource(GrResourceEntry* entry);
- class Key;
- GrTHashTable<GrResourceEntry, Key, 8> fCache;
+ GrTHashTable<GrResourceEntry, GrResourceKey, 8> fCache;
// We're an internal doubly linked list
typedef SkTInternalLList<GrResourceEntry> EntryList;
#include "GrResourceCache.h"
SK_DEFINE_INST_COUNT(GrStencilBuffer)
-GR_DEFINE_RESOURCE_CACHE_TYPE(GrStencilBuffer)
void GrStencilBuffer::transferToCache() {
GrAssert(NULL == this->getCacheEntry());
}
namespace {
-// we should never have more than one stencil buffer with same combo of
-// (width,height,samplecount)
-void gen_stencil_key_values(int width,
- int height,
- int sampleCnt,
- GrCacheID* cacheID) {
- cacheID->fPublicID = GrCacheID::kDefaultPublicCacheID;
- cacheID->fResourceSpecific32 = width | (height << 16);
- cacheID->fDomain = GrCacheData::kScratch_ResourceDomain;
-
- GrAssert(sampleCnt >= 0 && sampleCnt < 256);
- cacheID->fResourceSpecific16 = sampleCnt << 8;
-
- // last 8 bits of 'fResourceSpecific16' is free for flags
+// we should never have more than one stencil buffer with same combo of (width,height,samplecount)
+void gen_cache_id(int width, int height, int sampleCnt, GrCacheID* cacheID) {
+ static const GrCacheID::Domain gStencilBufferDomain = GrCacheID::GenerateDomain();
+ GrCacheID::Key key;
+ uint32_t* keyData = key.fData32;
+ keyData[0] = width;
+ keyData[1] = height;
+ keyData[2] = sampleCnt;
+ GR_STATIC_ASSERT(sizeof(key) >= 3 * sizeof(uint32_t));
+ cacheID->reset(gStencilBufferDomain, key);
}
}
GrResourceKey GrStencilBuffer::ComputeKey(int width,
int height,
int sampleCnt) {
- GrCacheID id(GrStencilBuffer::GetResourceType());
- gen_stencil_key_values(width, height, sampleCnt, &id);
-
- uint32_t v[4];
- id.toRaw(v);
- return GrResourceKey(v);
+ // All SBs are created internally to attach to RTs so they all use the same domain.
+ static const GrResourceKey::ResourceType gStencilBufferResourceType =
+ GrResourceKey::GenerateResourceType();
+ GrCacheID id;
+ gen_cache_id(width, height, sampleCnt, &id);
+
+ // we don't use any flags for SBs currently.
+ return GrResourceKey(id, gStencilBufferResourceType, 0);
}
#include "GrClipData.h"
#include "GrResource.h"
-#include "GrCacheID.h"
class GrRenderTarget;
class GrResourceEntry;
class GrStencilBuffer : public GrResource {
public:
SK_DECLARE_INST_COUNT(GrStencilBuffer);
- GR_DECLARE_RESOURCE_CACHE_TYPE()
virtual ~GrStencilBuffer() {
// TODO: allow SB to be purged and detach itself from rts
#if GR_DEBUG
template <typename T, typename Key, size_t kHashBits>
void GrTHashTable<T, Key, kHashBits>::validate() const {
- for (size_t i = 0; i < GR_ARRAY_COUNT(fHash); i++) {
- if (fHash[i]) {
- unsigned hashIndex = hash2Index(Key::GetHash(*fHash[i]));
- GrAssert(hashIndex == i);
- }
- }
-
int count = fSorted.count();
for (int i = 1; i < count; i++) {
GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) ||
#include "GrResourceCache.h"
SK_DEFINE_INST_COUNT(GrTexture)
-GR_DEFINE_RESOURCE_CACHE_TYPE(GrTexture)
/**
* This method allows us to interrupt the normal deletion process and place
}
}
-// These flags need to fit in <= 8 bits so they can be folded into the texture
+// These flags need to fit in a GrResourceKey::ResourceFlags so they can be folded into the texture
// key
-enum TextureBits {
- /*
- * The kNPOT bit is set when the texture is NPOT and is being repeated
- * but the hardware doesn't support that feature.
+enum TextureFlags {
+ /**
+ * The kStretchToPOT bit is set when the texture is NPOT and is being repeated but the
+ * hardware doesn't support that feature.
*/
- kNPOT_TextureBit = 0x1,
- /*
- * The kFilter bit can only be set when the kNPOT flag is set and indicates
- * whether the resizing of the texture should use filtering. This is
- * to handle cases where the original texture is indexed to disable
- * filtering.
+ kStretchToPOT_TextureFlag = 0x1,
+ /**
+ * The kFilter bit can only be set when the kStretchToPOT flag is set and indicates whether the
+ * stretched texture should be bilerp filtered or point sampled.
*/
- kFilter_TextureBit = 0x2,
- /*
- * The kScratch bit is set if the texture is being used as a scratch
- * texture.
- */
- kScratch_TextureBit = 0x4,
+ kFilter_TextureFlag = 0x2,
};
namespace {
-void gen_texture_key_values(const GrGpu* gpu,
- const GrTextureParams* params,
- const GrTextureDesc& desc,
- const GrCacheData& cacheData,
- bool scratch,
- GrCacheID* cacheID) {
-
- uint64_t clientKey = cacheData.fClientCacheID;
-
- if (scratch) {
- // Instead of a client-provided key of the texture contents
- // we create a key from the descriptor.
- GrAssert(GrCacheData::kScratch_CacheID == clientKey);
- clientKey = (desc.fFlags << 8) | ((uint64_t) desc.fConfig << 32);
- }
-
- cacheID->fPublicID = clientKey;
- cacheID->fDomain = cacheData.fResourceDomain;
-
- // we assume we only need 16 bits of width and height
- // assert that texture creation will fail anyway if this assumption
- // would cause key collisions.
- GrAssert(gpu->getCaps().maxTextureSize() <= SK_MaxU16);
- cacheID->fResourceSpecific32 = desc.fWidth | (desc.fHeight << 16);
-
- GrAssert(desc.fSampleCnt >= 0 && desc.fSampleCnt < 256);
- cacheID->fResourceSpecific16 = desc.fSampleCnt << 8;
-
- if (!gpu->getCaps().npotTextureTileSupport()) {
- bool isPow2 = GrIsPow2(desc.fWidth) && GrIsPow2(desc.fHeight);
-
- bool tiled = NULL != params && params->isTiled();
-
- if (tiled && !isPow2) {
- cacheID->fResourceSpecific16 |= kNPOT_TextureBit;
+GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu,
+ const GrTextureParams* params,
+ const GrTextureDesc& desc) {
+ GrResourceKey::ResourceFlags flags = 0;
+ bool tiled = NULL != params && params->isTiled();
+ if (tiled & !gpu->getCaps().npotTextureTileSupport()) {
+ if (!GrIsPow2(desc.fWidth) || GrIsPow2(desc.fHeight)) {
+ flags |= kStretchToPOT_TextureFlag;
if (params->isBilerp()) {
- cacheID->fResourceSpecific16 |= kFilter_TextureBit;
+ flags |= kFilter_TextureFlag;
}
}
}
+ return flags;
+}
- if (scratch) {
- cacheID->fResourceSpecific16 |= kScratch_TextureBit;
- }
+GrResourceKey::ResourceType texture_resource_type() {
+ static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType();
+ return gType;
}
}
GrResourceKey GrTexture::ComputeKey(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
- const GrCacheData& cacheData,
- bool scratch) {
- GrCacheID id(GrTexture::GetResourceType());
- gen_texture_key_values(gpu, params, desc, cacheData, scratch, &id);
-
- uint32_t v[4];
- id.toRaw(v);
- return GrResourceKey(v);
+ const GrCacheID& cacheID) {
+ GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc);
+ return GrResourceKey(cacheID, texture_resource_type(), flags);
}
-bool GrTexture::NeedsResizing(const GrResourceKey& key) {
- return 0 != (key.getValue32(3) & kNPOT_TextureBit);
+GrResourceKey GrTexture::ComputeScratchKey(const GrTextureDesc& desc) {
+ GrCacheID::Key idKey;
+ // Instead of a client-provided key of the texture contents we create a key from the
+ // descriptor.
+ GR_STATIC_ASSERT(sizeof(idKey) >= 12);
+ GrAssert(desc.fHeight < (1 << 16));
+ GrAssert(desc.fWidth < (1 << 16));
+ idKey.fData32[0] = (desc.fWidth) | (desc.fHeight << 16);
+ idKey.fData32[1] = desc.fConfig | desc.fSampleCnt << 16;
+ idKey.fData32[2] = desc.fFlags;
+ static const int kPadSize = sizeof(idKey) - 12;
+ memset(idKey.fData8 + 12, 0, kPadSize);
+
+ GrCacheID cacheID(GrResourceKey::ScratchDomain(), idKey);
+ return GrResourceKey(cacheID, texture_resource_type(), 0);
}
-bool GrTexture::IsScratchTexture(const GrResourceKey& key) {
- return 0 != (key.getValue32(3) & kScratch_TextureBit);
+bool GrTexture::NeedsResizing(const GrResourceKey& key) {
+ return SkToBool(key.getResourceFlags() & kStretchToPOT_TextureFlag);
}
bool GrTexture::NeedsFiltering(const GrResourceKey& key) {
- return 0 != (key.getValue32(3) & kFilter_TextureBit);
+ return SkToBool(key.getResourceFlags() & kFilter_TextureFlag);
}
return false;
}
// if the entire texture is already in our cache then no reason to tile it
- if (this->isBitmapInTextureCache(bitmap, params)) {
+ if (GrIsBitmapInCache(fContext, bitmap, ¶ms)) {
return false;
}
///////////////////////////////////////////////////////////////////////////////
-bool SkGpuDevice::isBitmapInTextureCache(const SkBitmap& bitmap,
- const GrTextureParams& params) const {
- uint64_t key = bitmap.getGenerationID();
- key |= ((uint64_t) bitmap.pixelRefOffset()) << 32;
-
- GrTextureDesc desc;
- desc.fWidth = bitmap.width();
- desc.fHeight = bitmap.height();
- desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
-
- GrCacheData cacheData(key);
-
- return this->context()->isTextureInCache(desc, cacheData, ¶ms);
-}
-
-
SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
int width, int height,
bool isOpaque,
#if CACHE_COMPATIBLE_DEVICE_TEXTURES
// layers are never draw in repeat modes, so we can request an approx
// match and ignore any padding.
- GrContext::ScratchTexMatch matchType = (kSaveLayer_Usage == usage) ?
- GrContext::kApprox_ScratchTexMatch :
- GrContext::kExact_ScratchTexMatch;
- texture = fContext->lockScratchTexture(desc, matchType);
+ const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ?
+ GrContext::kApprox_ScratchTexMatch :
+ GrContext::kExact_ScratchTexMatch;
+ texture = fContext->lockScratchTexture(desc, match);
#else
tunref.reset(fContext->createUncachedTexture(desc, NULL, 0));
texture = tunref.get();
////////////////////////////////////////////////////////////////////////////////
+void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) {
+ // Our id includes the offset, width, and height so that bitmaps created by extractSubset()
+ // are unique.
+ uint32_t genID = bitmap.getGenerationID();
+ size_t offset = bitmap.pixelRefOffset();
+ int16_t width = static_cast<int16_t>(bitmap.width());
+ int16_t height = static_cast<int16_t>(bitmap.height());
+
+ GrCacheID::Key key;
+ memcpy(key.fData8, &genID, 4);
+ memcpy(key.fData8 + 4, &width, 2);
+ memcpy(key.fData8 + 6, &height, 2);
+ memcpy(key.fData8 + 8, &offset, sizeof(size_t));
+ GR_STATIC_ASSERT(sizeof(key) >= 8 + sizeof(size_t));
+ static const GrCacheID::Domain gBitmapTextureDomain = GrCacheID::GenerateDomain();
+ id->reset(gBitmapTextureDomain, key);
+}
+
+void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) {
+ desc->fFlags = kNone_GrTextureFlags;
+ desc->fWidth = bitmap.width();
+ desc->fHeight = bitmap.height();
+ desc->fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
+ desc->fSampleCnt = 0;
+}
+
static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
- uint64_t key,
+ bool cache,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
SkAutoLockPixels alp(origBitmap);
const SkBitmap* bitmap = &origBitmap;
GrTextureDesc desc;
- desc.fWidth = bitmap->width();
- desc.fHeight = bitmap->height();
- desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
-
- GrCacheData cacheData(key);
+ generate_bitmap_texture_desc(*bitmap, &desc);
if (SkBitmap::kIndex8_Config == bitmap->config()) {
// build_compressed_data doesn't do npot->pot expansion
// our compressed data will be trimmed, so pass width() for its
// "rowBytes", since they are the same now.
- if (GrCacheData::kScratch_CacheID != key) {
- return ctx->createTexture(params, desc, cacheData,
+ if (cache) {
+ GrCacheID cacheID;
+ generate_bitmap_cache_id(origBitmap, &cacheID);
+ return ctx->createTexture(params, desc, cacheID,
storage.get(),
bitmap->width());
} else {
GrTexture* result = ctx->lockScratchTexture(desc,
- GrContext::kExact_ScratchTexMatch);
+ GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0, bitmap->width(),
bitmap->height(), desc.fConfig,
storage.get());
return result;
}
-
} else {
origBitmap.copyTo(&tmpBitmap, SkBitmap::kARGB_8888_Config);
// now bitmap points to our temp, which has been promoted to 32bits
bitmap = &tmpBitmap;
+ desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
}
}
- desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
- if (GrCacheData::kScratch_CacheID != key) {
+ if (cache) {
// This texture is likely to be used again so leave it in the cache
- // but locked.
- return ctx->createTexture(params, desc, cacheData,
+ GrCacheID cacheID;
+ generate_bitmap_cache_id(origBitmap, &cacheID);
+ return ctx->createTexture(params, desc, cacheID,
bitmap->getPixels(),
bitmap->rowBytes());
} else {
// cache so no one else can find it. Additionally, once unlocked, the
// scratch texture will go to the end of the list for purging so will
// likely be available for this volatile bitmap the next time around.
- GrTexture* result = ctx->lockScratchTexture(desc,
- GrContext::kExact_ScratchTexMatch);
+ GrTexture* result = ctx->lockScratchTexture(desc, GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0,
bitmap->width(), bitmap->height(),
desc.fConfig,
}
}
-///////////////////////////////////////////////////////////////////////////////
+bool GrIsBitmapInCache(const GrContext* ctx,
+ const SkBitmap& bitmap,
+ const GrTextureParams* params) {
+ GrCacheID cacheID;
+ generate_bitmap_cache_id(bitmap, &cacheID);
+
+ GrTextureDesc desc;
+ generate_bitmap_texture_desc(bitmap, &desc);
+ return ctx->isTextureInCache(desc, cacheID, params);
+}
GrTexture* GrLockCachedBitmapTexture(GrContext* ctx,
const SkBitmap& bitmap,
const GrTextureParams* params) {
GrTexture* result = NULL;
- if (!bitmap.isVolatile()) {
- // If the bitmap isn't changing try to find a cached copy first
- uint64_t key = bitmap.getGenerationID();
- key |= ((uint64_t) bitmap.pixelRefOffset()) << 32;
+ bool cache = !bitmap.isVolatile();
- GrTextureDesc desc;
- desc.fWidth = bitmap.width();
- desc.fHeight = bitmap.height();
- desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
+ if (cache) {
+ // If the bitmap isn't changing try to find a cached copy first.
- GrCacheData cacheData(key);
+ GrCacheID cacheID;
+ generate_bitmap_cache_id(bitmap, &cacheID);
- result = ctx->findTexture(desc, cacheData, params);
- if (NULL == result) {
- // didn't find a cached copy so create one
- result = sk_gr_create_bitmap_texture(ctx, key, params, bitmap);
- }
- } else {
- result = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap);
+ GrTextureDesc desc;
+ generate_bitmap_texture_desc(bitmap, &desc);
+
+ result = ctx->findTexture(desc, cacheID, params);
+ }
+ if (NULL == result) {
+ result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap);
}
if (NULL == result) {
GrPrintf("---- failed to create texture for cache [%d %d]\n",
#define VALIDATE
#endif
-GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrTextureStripAtlas, GetTextureStripAtlasDomain)
-
-
int32_t GrTextureStripAtlas::gCacheCount = 0;
GrTHashTable<GrTextureStripAtlas::AtlasEntry,
}
GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc)
- : fCacheID(sk_atomic_inc(&gCacheCount))
+ : fCacheKey(sk_atomic_inc(&gCacheCount))
, fLockedRows(0)
, fDesc(desc)
, fNumRows(desc.fHeight / desc.fRowHeight)
texDesc.fWidth = fDesc.fWidth;
texDesc.fHeight = fDesc.fHeight;
texDesc.fConfig = fDesc.fConfig;
- GrCacheData cacheData(fCacheID);
- cacheData.fResourceDomain = GetTextureStripAtlasDomain();
- fTexture = fDesc.fContext->findTexture(texDesc, cacheData, ¶ms);
+
+ static const GrCacheID::Domain gTextureStripAtlasDomain = GrCacheID::GenerateDomain();
+ GrCacheID::Key key;
+ *key.fData32 = fCacheKey;
+ memset(key.fData32 + 1, 0, sizeof(key) - sizeof(uint32_t));
+ GrCacheID cacheID(gTextureStripAtlasDomain, key);
+
+ fTexture = fDesc.fContext->findTexture(texDesc, cacheID, ¶ms);
if (NULL == fTexture) {
- fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheData, NULL, 0);
+ fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheID, NULL, 0);
// This is a new texture, so all of our cache info is now invalid
this->initLRU();
fKeyTable.rewind();
*/
class GrTextureStripAtlas {
public:
- GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetTextureStripAtlasDomain)
-
/**
* Descriptor struct which we'll use as a hash table key
**/
// A unique ID for this texture (formed with: gCacheCount++), so we can be sure that if we
// get a texture back from the texture cache, that it's the same one we last used.
- const uint64_t fCacheID;
+ const int32_t fCacheKey;
// Total locks on all rows (when this reaches zero, we can unlock our texture)
int32_t fLockedRows;