'<(skia_src_path)/core/SkBlitter_RGB16.cpp',
'<(skia_src_path)/core/SkBlitter_Sprite.cpp',
'<(skia_src_path)/core/SkBuffer.cpp',
+ '<(skia_src_path)/core/SkCachedData.cpp',
'<(skia_src_path)/core/SkCanvas.cpp',
'<(skia_src_path)/core/SkChunkAlloc.cpp',
'<(skia_src_path)/core/SkClipStack.cpp',
'../tests/BlendTest.cpp',
'../tests/BlitRowTest.cpp',
'../tests/BlurTest.cpp',
+ '../tests/CachedDataTest.cpp',
'../tests/CachedDecodingPixelRefTest.cpp',
'../tests/CanvasStateHelpers.cpp',
'../tests/CanvasStateTest.cpp',
};
#define CHECK_LOCAL(localCache, localName, globalName, ...) \
- (localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__)
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
bool SkBitmapCache::Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY, SkBitmap* result,
SkResourceCache* localCache) {
struct MipMapRec : public SkResourceCache::Rec {
MipMapRec(const SkBitmap& src, const SkMipMap* result)
: fKey(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src))
- , fMipMap(SkRef(result))
- {}
+ , fMipMap(result)
+ {
+ fMipMap->attachToCacheAndRef();
+ }
virtual ~MipMapRec() {
- fMipMap->unref();
+ fMipMap->detachFromCacheAndUnref();
}
- BitmapKey fKey;
- const SkMipMap* fMipMap;
-
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
- virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->getSize(); }
+ virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->size(); }
static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextMip) {
const MipMapRec& rec = static_cast<const MipMapRec&>(baseRec);
// mipmaps don't use the custom allocator yet, so we don't need to check pixels
return true;
}
+
+private:
+ BitmapKey fKey;
+ const SkMipMap* fMipMap;
};
-const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src) {
+const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src, SkResourceCache* localCache) {
BitmapKey key(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src));
const SkMipMap* result;
- if (!SkResourceCache::Find(key, MipMapRec::Visitor, &result)) {
+
+ if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Visitor, &result)) {
result = NULL;
}
return result;
}
-void SkMipMapCache::Add(const SkBitmap& src, const SkMipMap* result) {
- if (result) {
- SkResourceCache::Add(SkNEW_ARGS(MipMapRec, (src, result)));
+static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) {
+ return localCache ? localCache->GetDiscardableFactory()
+ : SkResourceCache::GetDiscardableFactory();
+}
+
+const SkMipMap* SkMipMapCache::AddAndRef(const SkBitmap& src, SkResourceCache* localCache) {
+ SkMipMap* mipmap = SkMipMap::Build(src, get_fact(localCache));
+ if (mipmap) {
+ MipMapRec* rec = SkNEW_ARGS(MipMapRec, (src, mipmap));
+ CHECK_LOCAL(localCache, add, Add, rec);
}
+ return mipmap;
}
class SkMipMapCache {
public:
- static const SkMipMap* FindAndRef(const SkBitmap& src);
- static void Add(const SkBitmap& src, const SkMipMap* result);
+ static const SkMipMap* FindAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL);
+ static const SkMipMap* AddAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL);
};
#endif
if (scaleSqd > SK_Scalar1) {
fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap));
if (NULL == fCurrMip.get()) {
- fCurrMip.reset(SkMipMap::Build(fOrigBitmap));
+ fCurrMip.reset(SkMipMapCache::AddAndRef(fOrigBitmap));
if (NULL == fCurrMip.get()) {
return false;
}
- SkMipMapCache::Add(fOrigBitmap, fCurrMip);
}
SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
--- /dev/null
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCachedData.h"
+#include "SkRefCnt.h"
+#include "SkDiscardableMemory.h"
+
+SkCachedData::SkCachedData(void* data, size_t size)
+ : fData(data)
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kMalloc_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fMalloc = data;
+}
+
+SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm)
+ : fData(dm->data())
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kDiscardableMemory_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fDM = dm;
+}
+
+SkCachedData::~SkCachedData() {
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ sk_free(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ SkDELETE(fStorage.fDM);
+ break;
+ }
+}
+
+class SkCachedData::AutoMutexWritable {
+public:
+ AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast<SkCachedData*>(cd)) {
+ fCD->fMutex.acquire();
+ fCD->validate();
+ }
+ ~AutoMutexWritable() {
+ fCD->validate();
+ fCD->fMutex.release();
+ }
+
+ SkCachedData* get() { return fCD; }
+ SkCachedData* operator->() { return fCD; }
+
+private:
+ SkCachedData* fCD;
+};
+
+void SkCachedData::internalRef(bool fromCache) const {
+ AutoMutexWritable(this)->inMutexRef(fromCache);
+}
+
+void SkCachedData::internalUnref(bool fromCache) const {
+ if (AutoMutexWritable(this)->inMutexUnref(fromCache)) {
+ // can't delete inside doInternalUnref, since it is locking a mutex (which we own)
+ SkDELETE(this);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkCachedData::inMutexRef(bool fromCache) {
+ if ((1 == fRefCnt) && fInCache) {
+ this->inMutexLock();
+ }
+
+ fRefCnt += 1;
+ if (fromCache) {
+ SkASSERT(!fInCache);
+ fInCache = true;
+ }
+}
+
+bool SkCachedData::inMutexUnref(bool fromCache) {
+ switch (--fRefCnt) {
+ case 0:
+ // we're going to be deleted, so we need to be unlocked (for DiscardableMemory)
+ if (fIsLocked) {
+ this->inMutexUnlock();
+ }
+ break;
+ case 1:
+ if (fInCache && !fromCache) {
+ // If we're down to 1 owner, and that owner is the cache, this it is safe
+ // to unlock (and mutate fData) even if the cache is in a different thread,
+ // as the cache is NOT allowed to inspect or use fData.
+ this->inMutexUnlock();
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (fromCache) {
+ SkASSERT(fInCache);
+ fInCache = false;
+ }
+
+ // return true when we need to be deleted
+ return 0 == fRefCnt;
+}
+
+void SkCachedData::inMutexLock() {
+ fMutex.assertHeld();
+
+ SkASSERT(!fIsLocked);
+ fIsLocked = true;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ this->setData(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fStorage.fDM->lock()) {
+ this->setData(fStorage.fDM->data());
+ } else {
+ this->setData(NULL); // signal failure to lock, contents are gone
+ }
+ break;
+ }
+}
+
+void SkCachedData::inMutexUnlock() {
+ fMutex.assertHeld();
+
+ SkASSERT(fIsLocked);
+ fIsLocked = false;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ // nothing to do/check
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fData) { // did the previous lock succeed?
+ fStorage.fDM->unlock();
+ }
+ break;
+ }
+ this->setData(NULL); // signal that we're in an unlocked state
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkCachedData::validate() const {
+ if (fIsLocked) {
+ SkASSERT((fInCache && fRefCnt > 1) || !fInCache);
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ SkASSERT(fData == fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ // fData can be null or the actual value, depending if DM's lock succeeded
+ break;
+ }
+ } else {
+ SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt));
+ SkASSERT(NULL == fData);
+ }
+}
+#endif
--- /dev/null
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCachedData_DEFINED
+#define SkCachedData_DEFINED
+
+#include "SkThread.h"
+
+class SkDiscardableMemory;
+
+class SkCachedData : ::SkNoncopyable {
+public:
+ SkCachedData(void* mallocData, size_t size);
+ SkCachedData(size_t size, SkDiscardableMemory*);
+ virtual ~SkCachedData();
+
+ size_t size() const { return fSize; }
+ const void* data() const { return fData; }
+
+ void* writable_data() { return fData; }
+
+ void ref() const { this->internalRef(false); }
+ void unref() const { this->internalUnref(false); }
+
+ int testing_only_getRefCnt() const { return fRefCnt; }
+ bool testing_only_isLocked() const { return fIsLocked; }
+ bool testing_only_isInCache() const { return fInCache; }
+
+protected:
+ // called when fData changes. could be NULL.
+ virtual void onDataChange(void* oldData, void* newData) {}
+
+private:
+ SkMutex fMutex; // could use a pool of these...
+
+ enum StorageType {
+ kDiscardableMemory_StorageType,
+ kMalloc_StorageType
+ };
+
+ union {
+ SkDiscardableMemory* fDM;
+ void* fMalloc;
+ } fStorage;
+ void* fData;
+ size_t fSize;
+ int fRefCnt; // low-bit means we're owned by the cache
+ StorageType fStorageType;
+ bool fInCache;
+ bool fIsLocked;
+
+ void internalRef(bool fromCache) const;
+ void internalUnref(bool fromCache) const;
+
+ void inMutexRef(bool fromCache);
+ bool inMutexUnref(bool fromCache); // returns true if we should delete "this"
+ void inMutexLock();
+ void inMutexUnlock();
+
+ // called whenever our fData might change (lock or unlock)
+ void setData(void* newData) {
+ if (newData != fData) {
+ // notify our subclasses of the change
+ this->onDataChange(fData, newData);
+ fData = newData;
+ }
+ }
+
+ class AutoMutexWritable;
+
+public:
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ /*
+ * Attaching a data to to a SkResourceCache (only one at a time) enables the data to be
+ * unlocked when the cache is the only owner, thus freeing it to be purged (assuming the
+ * data is backed by a SkDiscardableMemory).
+ *
+ * When attached, it also automatically attempts to "lock" the data when the first client
+ * ref's the data (typically from a find(key, visitor) call).
+ *
+ * Thus the data will always be "locked" when a non-cache has a ref on it (whether or not
+ * the lock succeeded to recover the memory -- check data() to see if it is NULL).
+ */
+
+ /*
+ * Call when adding this instance to a SkResourceCache::Rec subclass
+ * (typically in the Rec's constructor).
+ */
+ void attachToCacheAndRef() const { this->internalRef(true); }
+
+ /*
+ * Call when removing this instance from a SkResourceCache::Rec subclass
+ * (typically in the Rec's destructor).
+ */
+ void detachFromCacheAndUnref() const { this->internalUnref(true); }
+};
+
+#endif
*dst->getAddr16(x >> 1, y >> 1) = (uint16_t)collaps4444(c >> 2);
}
-SkMipMap::Level* SkMipMap::AllocLevels(int levelCount, size_t pixelSize) {
+size_t SkMipMap::AllocLevelsSize(int levelCount, size_t pixelSize) {
if (levelCount < 0) {
- return NULL;
+ return 0;
}
int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize;
if (!sk_64_isS32(size)) {
- return NULL;
+ return 0;
}
- return (Level*)sk_malloc_throw(sk_64_asS32(size));
+ return sk_64_asS32(size);
}
-SkMipMap* SkMipMap::Build(const SkBitmap& src) {
+SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
void (*proc)(SkBitmap* dst, int x, int y, const SkBitmap& src);
const SkColorType ct = src.colorType();
return NULL;
}
- Level* levels = SkMipMap::AllocLevels(countLevels, size);
- if (NULL == levels) {
+ size_t storageSize = SkMipMap::AllocLevelsSize(countLevels, size);
+ if (0 == storageSize) {
return NULL;
}
+ SkMipMap* mipmap;
+ if (fact) {
+ SkDiscardableMemory* dm = fact(storageSize);
+ if (NULL == dm) {
+ return NULL;
+ }
+ mipmap = SkNEW_ARGS(SkMipMap, (storageSize, dm));
+ } else {
+ mipmap = SkNEW_ARGS(SkMipMap, (sk_malloc_throw(storageSize), storageSize));
+ }
+
+ // init
+ mipmap->fCount = countLevels;
+ mipmap->fLevels = (Level*)mipmap->writable_data();
+
+ Level* levels = mipmap->fLevels;
uint8_t* baseAddr = (uint8_t*)&levels[countLevels];
uint8_t* addr = baseAddr;
int width = src.width();
}
SkASSERT(addr == baseAddr + size);
- return SkNEW_ARGS(SkMipMap, (levels, countLevels, size));
+ return mipmap;
}
///////////////////////////////////////////////////////////////////////////////
//static int gCounter;
-SkMipMap::SkMipMap(Level* levels, int count, size_t size)
- : fSize(size), fLevels(levels), fCount(count) {
- SkASSERT(levels);
- SkASSERT(count > 0);
-// SkDebugf("mips %d\n", ++gCounter);
-}
-
-SkMipMap::~SkMipMap() {
- sk_free(fLevels);
-// SkDebugf("mips %d\n", --gCounter);
-}
-
static SkFixed compute_level(SkScalar scale) {
SkFixed s = SkAbs32(SkScalarToFixed(SkScalarInvert(scale)));
}
bool SkMipMap::extractLevel(SkScalar scale, Level* levelPtr) const {
+ if (NULL == fLevels) {
+ return false;
+ }
+
if (scale >= SK_Scalar1) {
return false;
}
#ifndef SkMipMap_DEFINED
#define SkMipMap_DEFINED
-#include "SkRefCnt.h"
+#include "SkCachedData.h"
#include "SkScalar.h"
class SkBitmap;
+class SkDiscardableMemory;
-class SkMipMap : public SkRefCnt {
+typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes);
+
+class SkMipMap : public SkCachedData {
public:
- static SkMipMap* Build(const SkBitmap& src);
+ static SkMipMap* Build(const SkBitmap& src, SkDiscardableFactoryProc);
struct Level {
void* fPixels;
bool extractLevel(SkScalar scale, Level*) const;
- size_t getSize() const { return fSize; }
+protected:
+ virtual void onDataChange(void* oldData, void* newData) SK_OVERRIDE {
+ fLevels = (Level*)newData; // could be NULL
+ }
private:
- size_t fSize;
Level* fLevels;
int fCount;
// we take ownership of levels, and will free it with sk_free()
- SkMipMap(Level* levels, int count, size_t size);
- virtual ~SkMipMap();
+ SkMipMap(void* malloc, size_t size) : INHERITED(malloc, size) {}
+ SkMipMap(size_t size, SkDiscardableMemory* dm) : INHERITED(size, dm) {}
+
+ static size_t AllocLevelsSize(int levelCount, size_t pixelSize);
- static Level* AllocLevels(int levelCount, size_t pixelSize);
+ typedef SkCachedData INHERITED;
};
#endif
* found in the LICENSE file.
*/
+#include "SkCachedData.h"
#include "SkChecksum.h"
#include "SkResourceCache.h"
#include "SkMipMap.h"
return prevLimit;
}
+SkCachedData* SkResourceCache::newCachedData(size_t bytes) {
+ if (fDiscardableFactory) {
+ SkDiscardableMemory* dm = fDiscardableFactory(bytes);
+ return dm ? SkNEW_ARGS(SkCachedData, (bytes, dm)) : NULL;
+ } else {
+ return SkNEW_ARGS(SkCachedData, (sk_malloc_throw(bytes), bytes));
+ }
+}
+
///////////////////////////////////////////////////////////////////////////////
void SkResourceCache::detach(Rec* rec) {
return get_cache()->allocator();
}
+SkCachedData* SkResourceCache::NewCachedData(size_t bytes) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->newCachedData(bytes);
+}
+
void SkResourceCache::Dump() {
SkAutoMutexAcquire am(gMutex);
get_cache()->dump();
#include "SkBitmap.h"
+class SkCachedData;
class SkDiscardableMemory;
class SkMipMap;
*/
static SkBitmap::Allocator* GetAllocator();
+ static SkCachedData* NewCachedData(size_t bytes);
+
/**
* Call SkDebugf() with diagnostic information about the state of the cache
*/
DiscardableFactory discardableFactory() const { return fDiscardableFactory; }
SkBitmap::Allocator* allocator() const { return fAllocator; };
+ SkCachedData* newCachedData(size_t bytes);
+
/**
* Call SkDebugf() with diagnostic information about the state of the cache
*/
--- /dev/null
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCachedData.h"
+#include "SkDiscardableMemoryPool.h"
+#include "Test.h"
+
+enum LockedState {
+ kUnlocked,
+ kLocked,
+};
+
+enum CachedState {
+ kNotInCache,
+ kInCache,
+};
+
+static void check_data(skiatest::Reporter* reporter, SkCachedData* data,
+ int refcnt, CachedState cacheState, LockedState lockedState) {
+ REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
+ REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
+ REPORTER_ASSERT(reporter, data->testing_only_isLocked() == (lockedState == kLocked));
+}
+
+static SkCachedData* make_data(size_t size, SkDiscardableMemoryPool* pool) {
+ if (pool) {
+ SkDiscardableMemory* dm = pool->create(size);
+ // the pool "can" return null, but it shouldn't in these controlled conditions
+ SK_ALWAYSBREAK(dm);
+ return SkNEW_ARGS(SkCachedData, (size, dm));
+ } else {
+ return SkNEW_ARGS(SkCachedData, (sk_malloc_throw(size), size));
+ }
+}
+
+// returns with the data locked by client and cache
+static SkCachedData* test_locking(skiatest::Reporter* reporter,
+ size_t size, SkDiscardableMemoryPool* pool) {
+ SkCachedData* data = make_data(size, pool);
+
+ memset(data->writable_data(), 0x80, size); // just to use writable_data()
+
+ check_data(reporter, data, 1, kNotInCache, kLocked);
+
+ data->ref();
+ check_data(reporter, data, 2, kNotInCache, kLocked);
+ data->unref();
+ check_data(reporter, data, 1, kNotInCache, kLocked);
+
+ data->attachToCacheAndRef();
+ check_data(reporter, data, 2, kInCache, kLocked);
+
+ data->unref();
+ check_data(reporter, data, 1, kInCache, kUnlocked);
+
+ data->ref();
+ check_data(reporter, data, 2, kInCache, kLocked);
+
+ return data;
+}
+
+/*
+ * SkCachedData behaves differently (regarding its locked/unlocked state) depending on
+ * when it is in the cache or not. Being in the cache is signaled by calling attachToCacheAndRef()
+ * instead of ref(). (and balanced by detachFromCacheAndUnref).
+ *
+ * Thus, among other things, we test the end-of-life behavior when the client is the last owner
+ * and when the cache is.
+ */
+DEF_TEST(CachedData, reporter) {
+ SkAutoTUnref<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Create(1000));
+
+ for (int useDiscardable = 0; useDiscardable <= 1; ++useDiscardable) {
+ const size_t size = 100;
+
+ // test with client as last owner
+ SkCachedData* data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL);
+ check_data(reporter, data, 2, kInCache, kLocked);
+ data->detachFromCacheAndUnref();
+ check_data(reporter, data, 1, kNotInCache, kLocked);
+ data->unref();
+
+ // test with cache as last owner
+ data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL);
+ check_data(reporter, data, 2, kInCache, kLocked);
+ data->unref();
+ check_data(reporter, data, 1, kInCache, kUnlocked);
+ data->detachFromCacheAndUnref();
+ }
+}
+
for (int i = 0; i < 500; ++i) {
make_bitmap(&bm, rand);
- SkAutoTUnref<SkMipMap> mm(SkMipMap::Build(bm));
+ SkAutoTUnref<SkMipMap> mm(SkMipMap::Build(bm, NULL));
REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1, NULL));
REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1 * 2, NULL));
REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache));
}
+#include "SkMipMap.h"
+
+enum LockedState {
+ kNotLocked,
+ kLocked,
+};
+
+enum CachedState {
+ kNotInCache,
+ kInCache,
+};
+
+static void check_data(skiatest::Reporter* reporter, const SkCachedData* data,
+ int refcnt, CachedState cacheState, LockedState lockedState) {
+ REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
+ REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
+ bool isLocked = (data->data() != NULL);
+ REPORTER_ASSERT(reporter, isLocked == (lockedState == kLocked));
+}
+
+static void test_mipmapcache(skiatest::Reporter* reporter, SkResourceCache* cache) {
+ cache->purgeAll();
+
+ SkBitmap src;
+ src.allocN32Pixels(5, 5);
+ src.setImmutable();
+
+ const SkMipMap* mipmap = SkMipMapCache::FindAndRef(src, cache);
+ REPORTER_ASSERT(reporter, NULL == mipmap);
+
+ mipmap = SkMipMapCache::AddAndRef(src, cache);
+ REPORTER_ASSERT(reporter, mipmap);
+ check_data(reporter, mipmap, 2, kInCache, kLocked);
+
+ mipmap->unref();
+ // tricky, since technically after this I'm no longer an owner, but since the cache is
+ // local, I know it won't get purged behind my back
+ check_data(reporter, mipmap, 1, kInCache, kNotLocked);
+
+ // find us again
+ mipmap = SkMipMapCache::FindAndRef(src, cache);
+ check_data(reporter, mipmap, 2, kInCache, kLocked);
+
+ cache->purgeAll();
+ check_data(reporter, mipmap, 1, kNotInCache, kLocked);
+
+ mipmap->unref();
+}
+
DEF_TEST(BitmapCache_discarded_bitmap, reporter) {
SkResourceCache::DiscardableFactory factory = SkResourceCache::GetDiscardableFactory();
SkBitmap::Allocator* allocator = SkBitmapCache::GetAllocator();
// We can add the bitmap back to the cache and find it again.
REPORTER_ASSERT(reporter, SkBitmapCache::Add(cachedBitmap.getGenerationID(), rect, cachedBitmap, cache));
REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache));
+
+ test_mipmapcache(reporter, cache);
}