From: reed Date: Wed, 8 Oct 2014 12:17:12 +0000 (-0700) Subject: Add SkCachedData and use it for SkMipMap X-Git-Tag: submit/tizen/20180928.044319~5539 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9d93c2ebb31bf996905532446644f242339a774e;p=platform%2Fupstream%2FlibSkiaSharp.git Add SkCachedData and use it for SkMipMap This reverts commit 37c5a815d8ea33247968212ef4cc83394ceee1bc. TBR=mtklein Review URL: https://codereview.chromium.org/635333002 --- diff --git a/gyp/core.gypi b/gyp/core.gypi index fb10f32192..7c419ed4b4 100644 --- a/gyp/core.gypi +++ b/gyp/core.gypi @@ -42,6 +42,7 @@ '<(skia_src_path)/core/SkBlitter_RGB16.cpp', '<(skia_src_path)/core/SkBlitter_Sprite.cpp', '<(skia_src_path)/core/SkBuffer.cpp', + '<(skia_src_path)/core/SkCachedData.cpp', '<(skia_src_path)/core/SkCanvas.cpp', '<(skia_src_path)/core/SkChunkAlloc.cpp', '<(skia_src_path)/core/SkClipStack.cpp', diff --git a/gyp/tests.gypi b/gyp/tests.gypi index 1d38c488e0..db87f3f7a4 100644 --- a/gyp/tests.gypi +++ b/gyp/tests.gypi @@ -58,6 +58,7 @@ '../tests/BlendTest.cpp', '../tests/BlitRowTest.cpp', '../tests/BlurTest.cpp', + '../tests/CachedDataTest.cpp', '../tests/CachedDecodingPixelRefTest.cpp', '../tests/CanvasStateHelpers.cpp', '../tests/CanvasStateTest.cpp', diff --git a/src/core/SkBitmapCache.cpp b/src/core/SkBitmapCache.cpp index 5044d30518..13ec5aa961 100644 --- a/src/core/SkBitmapCache.cpp +++ b/src/core/SkBitmapCache.cpp @@ -71,7 +71,7 @@ struct BitmapRec : public SkResourceCache::Rec { }; #define CHECK_LOCAL(localCache, localName, globalName, ...) \ - (localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__) + ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__)) bool SkBitmapCache::Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY, SkBitmap* result, SkResourceCache* localCache) { @@ -125,41 +125,58 @@ bool SkBitmapCache::Add(uint32_t genID, const SkIRect& subset, const SkBitmap& r struct MipMapRec : public SkResourceCache::Rec { MipMapRec(const SkBitmap& src, const SkMipMap* result) : fKey(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src)) - , fMipMap(SkRef(result)) - {} + , fMipMap(result) + { + fMipMap->attachToCacheAndRef(); + } virtual ~MipMapRec() { - fMipMap->unref(); + fMipMap->detachFromCacheAndUnref(); } - BitmapKey fKey; - const SkMipMap* fMipMap; - virtual const Key& getKey() const SK_OVERRIDE { return fKey; } - virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->getSize(); } + virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->size(); } static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextMip) { const MipMapRec& rec = static_cast(baseRec); - const SkMipMap** result = (const SkMipMap**)contextMip; - - *result = SkRef(rec.fMipMap); - // mipmaps don't use the custom allocator yet, so we don't need to check pixels + const SkMipMap* mm = SkRef(rec.fMipMap); + // the call to ref() above triggers a "lock" in the case of discardable memory, + // which means we can now check for null (in case the lock failed). + if (NULL == mm->data()) { + mm->unref(); // balance our call to ref() + return false; + } + // the call must call unref() when they are done. + *(const SkMipMap**)contextMip = mm; return true; } + +private: + BitmapKey fKey; + const SkMipMap* fMipMap; }; -const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src) { +const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src, SkResourceCache* localCache) { BitmapKey key(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src)); const SkMipMap* result; - if (!SkResourceCache::Find(key, MipMapRec::Visitor, &result)) { + + if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Visitor, &result)) { result = NULL; } return result; } -void SkMipMapCache::Add(const SkBitmap& src, const SkMipMap* result) { - if (result) { - SkResourceCache::Add(SkNEW_ARGS(MipMapRec, (src, result))); +static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) { + return localCache ? localCache->GetDiscardableFactory() + : SkResourceCache::GetDiscardableFactory(); +} + +const SkMipMap* SkMipMapCache::AddAndRef(const SkBitmap& src, SkResourceCache* localCache) { + SkMipMap* mipmap = SkMipMap::Build(src, get_fact(localCache)); + if (mipmap) { + MipMapRec* rec = SkNEW_ARGS(MipMapRec, (src, mipmap)); + CHECK_LOCAL(localCache, add, Add, rec); } + return mipmap; } diff --git a/src/core/SkBitmapCache.h b/src/core/SkBitmapCache.h index 181c85812a..ec8a6b803e 100644 --- a/src/core/SkBitmapCache.h +++ b/src/core/SkBitmapCache.h @@ -52,8 +52,8 @@ public: class SkMipMapCache { public: - static const SkMipMap* FindAndRef(const SkBitmap& src); - static void Add(const SkBitmap& src, const SkMipMap* result); + static const SkMipMap* FindAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL); + static const SkMipMap* AddAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL); }; #endif diff --git a/src/core/SkBitmapProcState.cpp b/src/core/SkBitmapProcState.cpp index 6c1dc30449..2a449d6d8f 100644 --- a/src/core/SkBitmapProcState.cpp +++ b/src/core/SkBitmapProcState.cpp @@ -260,11 +260,14 @@ bool SkBitmapProcState::possiblyScaleImage() { if (scaleSqd > SK_Scalar1) { fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap)); if (NULL == fCurrMip.get()) { - fCurrMip.reset(SkMipMap::Build(fOrigBitmap)); + fCurrMip.reset(SkMipMapCache::AddAndRef(fOrigBitmap)); if (NULL == fCurrMip.get()) { return false; } - SkMipMapCache::Add(fOrigBitmap, fCurrMip); + } + // diagnostic for a crasher... + if (NULL == fCurrMip->data()) { + sk_throw(); } SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd)); @@ -280,6 +283,9 @@ bool SkBitmapProcState::possiblyScaleImage() { fBitmap = &fScaledBitmap; fFilterLevel = SkPaint::kLow_FilterLevel; return true; + } else { + // failed to extract, so release the mipmap + fCurrMip.reset(NULL); } } diff --git a/src/core/SkCachedData.cpp b/src/core/SkCachedData.cpp new file mode 100644 index 0000000000..f1fb026ba8 --- /dev/null +++ b/src/core/SkCachedData.cpp @@ -0,0 +1,199 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "SkCachedData.h" +#include "SkRefCnt.h" +#include "SkDiscardableMemory.h" + +//#define TRACK_CACHEDDATA_LIFETIME + +#ifdef TRACK_CACHEDDATA_LIFETIME +static int32_t gCachedDataCounter; + +static void inc() { + int32_t oldCount = sk_atomic_inc(&gCachedDataCounter); + SkDebugf("SkCachedData inc %d\n", oldCount + 1); +} + +static void dec() { + int32_t oldCount = sk_atomic_dec(&gCachedDataCounter); + SkDebugf("SkCachedData dec %d\n", oldCount - 1); +} +#else +static void inc() {} +static void dec() {} +#endif + +SkCachedData::SkCachedData(void* data, size_t size) + : fData(data) + , fSize(size) + , fRefCnt(1) + , fStorageType(kMalloc_StorageType) + , fInCache(false) + , fIsLocked(true) +{ + fStorage.fMalloc = data; + inc(); +} + +SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm) + : fData(dm->data()) + , fSize(size) + , fRefCnt(1) + , fStorageType(kDiscardableMemory_StorageType) + , fInCache(false) + , fIsLocked(true) +{ + fStorage.fDM = dm; + inc(); +} + +SkCachedData::~SkCachedData() { + switch (fStorageType) { + case kMalloc_StorageType: + sk_free(fStorage.fMalloc); + break; + case kDiscardableMemory_StorageType: + SkDELETE(fStorage.fDM); + break; + } + dec(); +} + +class SkCachedData::AutoMutexWritable { +public: + AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast(cd)) { + fCD->fMutex.acquire(); + fCD->validate(); + } + ~AutoMutexWritable() { + fCD->validate(); + fCD->fMutex.release(); + } + + SkCachedData* get() { return fCD; } + SkCachedData* operator->() { return fCD; } + +private: + SkCachedData* fCD; +}; + +void SkCachedData::internalRef(bool fromCache) const { + AutoMutexWritable(this)->inMutexRef(fromCache); +} + +void SkCachedData::internalUnref(bool fromCache) const { + if (AutoMutexWritable(this)->inMutexUnref(fromCache)) { + // can't delete inside doInternalUnref, since it is locking a mutex (which we own) + SkDELETE(this); + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void SkCachedData::inMutexRef(bool fromCache) { + if ((1 == fRefCnt) && fInCache) { + this->inMutexLock(); + } + + fRefCnt += 1; + if (fromCache) { + SkASSERT(!fInCache); + fInCache = true; + } +} + +bool SkCachedData::inMutexUnref(bool fromCache) { + switch (--fRefCnt) { + case 0: + // we're going to be deleted, so we need to be unlocked (for DiscardableMemory) + if (fIsLocked) { + this->inMutexUnlock(); + } + break; + case 1: + if (fInCache && !fromCache) { + // If we're down to 1 owner, and that owner is the cache, this it is safe + // to unlock (and mutate fData) even if the cache is in a different thread, + // as the cache is NOT allowed to inspect or use fData. + this->inMutexUnlock(); + } + break; + default: + break; + } + + if (fromCache) { + SkASSERT(fInCache); + fInCache = false; + } + + // return true when we need to be deleted + return 0 == fRefCnt; +} + +void SkCachedData::inMutexLock() { + fMutex.assertHeld(); + + SkASSERT(!fIsLocked); + fIsLocked = true; + + switch (fStorageType) { + case kMalloc_StorageType: + this->setData(fStorage.fMalloc); + break; + case kDiscardableMemory_StorageType: + if (fStorage.fDM->lock()) { + void* ptr = fStorage.fDM->data(); + SkASSERT(ptr); + this->setData(ptr); + } else { + this->setData(NULL); // signal failure to lock, contents are gone + } + break; + } +} + +void SkCachedData::inMutexUnlock() { + fMutex.assertHeld(); + + SkASSERT(fIsLocked); + fIsLocked = false; + + switch (fStorageType) { + case kMalloc_StorageType: + // nothing to do/check + break; + case kDiscardableMemory_StorageType: + if (fData) { // did the previous lock succeed? + fStorage.fDM->unlock(); + } + break; + } + this->setData(NULL); // signal that we're in an unlocked state +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef SK_DEBUG +void SkCachedData::validate() const { + if (fIsLocked) { + SkASSERT((fInCache && fRefCnt > 1) || !fInCache); + switch (fStorageType) { + case kMalloc_StorageType: + SkASSERT(fData == fStorage.fMalloc); + break; + case kDiscardableMemory_StorageType: + // fData can be null or the actual value, depending if DM's lock succeeded + break; + } + } else { + SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt)); + SkASSERT(NULL == fData); + } +} +#endif diff --git a/src/core/SkCachedData.h b/src/core/SkCachedData.h new file mode 100644 index 0000000000..886ca3e7e4 --- /dev/null +++ b/src/core/SkCachedData.h @@ -0,0 +1,107 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCachedData_DEFINED +#define SkCachedData_DEFINED + +#include "SkThread.h" + +class SkDiscardableMemory; + +class SkCachedData : ::SkNoncopyable { +public: + SkCachedData(void* mallocData, size_t size); + SkCachedData(size_t size, SkDiscardableMemory*); + virtual ~SkCachedData(); + + size_t size() const { return fSize; } + const void* data() const { return fData; } + + void* writable_data() { return fData; } + + void ref() const { this->internalRef(false); } + void unref() const { this->internalUnref(false); } + + int testing_only_getRefCnt() const { return fRefCnt; } + bool testing_only_isLocked() const { return fIsLocked; } + bool testing_only_isInCache() const { return fInCache; } + +protected: + // called when fData changes. could be NULL. + virtual void onDataChange(void* oldData, void* newData) {} + +private: + SkMutex fMutex; // could use a pool of these... + + enum StorageType { + kDiscardableMemory_StorageType, + kMalloc_StorageType + }; + + union { + SkDiscardableMemory* fDM; + void* fMalloc; + } fStorage; + void* fData; + size_t fSize; + int fRefCnt; // low-bit means we're owned by the cache + StorageType fStorageType; + bool fInCache; + bool fIsLocked; + + void internalRef(bool fromCache) const; + void internalUnref(bool fromCache) const; + + void inMutexRef(bool fromCache); + bool inMutexUnref(bool fromCache); // returns true if we should delete "this" + void inMutexLock(); + void inMutexUnlock(); + + // called whenever our fData might change (lock or unlock) + void setData(void* newData) { + if (newData != fData) { + // notify our subclasses of the change + this->onDataChange(fData, newData); + fData = newData; + } + } + + class AutoMutexWritable; + +public: +#ifdef SK_DEBUG + void validate() const; +#else + void validate() const {} +#endif + + /* + * Attaching a data to to a SkResourceCache (only one at a time) enables the data to be + * unlocked when the cache is the only owner, thus freeing it to be purged (assuming the + * data is backed by a SkDiscardableMemory). + * + * When attached, it also automatically attempts to "lock" the data when the first client + * ref's the data (typically from a find(key, visitor) call). + * + * Thus the data will always be "locked" when a non-cache has a ref on it (whether or not + * the lock succeeded to recover the memory -- check data() to see if it is NULL). + */ + + /* + * Call when adding this instance to a SkResourceCache::Rec subclass + * (typically in the Rec's constructor). + */ + void attachToCacheAndRef() const { this->internalRef(true); } + + /* + * Call when removing this instance from a SkResourceCache::Rec subclass + * (typically in the Rec's destructor). + */ + void detachFromCacheAndUnref() const { this->internalUnref(true); } +}; + +#endif diff --git a/src/core/SkMipMap.cpp b/src/core/SkMipMap.cpp index cb88eb4596..fdfb660ccc 100644 --- a/src/core/SkMipMap.cpp +++ b/src/core/SkMipMap.cpp @@ -109,18 +109,18 @@ static void downsampleby2_proc4444(SkBitmap* dst, int x, int y, *dst->getAddr16(x >> 1, y >> 1) = (uint16_t)collaps4444(c >> 2); } -SkMipMap::Level* SkMipMap::AllocLevels(int levelCount, size_t pixelSize) { +size_t SkMipMap::AllocLevelsSize(int levelCount, size_t pixelSize) { if (levelCount < 0) { - return NULL; + return 0; } int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize; if (!sk_64_isS32(size)) { - return NULL; + return 0; } - return (Level*)sk_malloc_throw(sk_64_asS32(size)); + return sk_64_asS32(size); } -SkMipMap* SkMipMap::Build(const SkBitmap& src) { +SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) { void (*proc)(SkBitmap* dst, int x, int y, const SkBitmap& src); const SkColorType ct = src.colorType(); @@ -165,11 +165,27 @@ SkMipMap* SkMipMap::Build(const SkBitmap& src) { return NULL; } - Level* levels = SkMipMap::AllocLevels(countLevels, size); - if (NULL == levels) { + size_t storageSize = SkMipMap::AllocLevelsSize(countLevels, size); + if (0 == storageSize) { return NULL; } + SkMipMap* mipmap; + if (fact) { + SkDiscardableMemory* dm = fact(storageSize); + if (NULL == dm) { + return NULL; + } + mipmap = SkNEW_ARGS(SkMipMap, (storageSize, dm)); + } else { + mipmap = SkNEW_ARGS(SkMipMap, (sk_malloc_throw(storageSize), storageSize)); + } + + // init + mipmap->fCount = countLevels; + mipmap->fLevels = (Level*)mipmap->writable_data(); + + Level* levels = mipmap->fLevels; uint8_t* baseAddr = (uint8_t*)&levels[countLevels]; uint8_t* addr = baseAddr; int width = src.width(); @@ -204,25 +220,13 @@ SkMipMap* SkMipMap::Build(const SkBitmap& src) { } SkASSERT(addr == baseAddr + size); - return SkNEW_ARGS(SkMipMap, (levels, countLevels, size)); + return mipmap; } /////////////////////////////////////////////////////////////////////////////// //static int gCounter; -SkMipMap::SkMipMap(Level* levels, int count, size_t size) - : fSize(size), fLevels(levels), fCount(count) { - SkASSERT(levels); - SkASSERT(count > 0); -// SkDebugf("mips %d\n", ++gCounter); -} - -SkMipMap::~SkMipMap() { - sk_free(fLevels); -// SkDebugf("mips %d\n", --gCounter); -} - static SkFixed compute_level(SkScalar scale) { SkFixed s = SkAbs32(SkScalarToFixed(SkScalarInvert(scale))); @@ -235,6 +239,10 @@ static SkFixed compute_level(SkScalar scale) { } bool SkMipMap::extractLevel(SkScalar scale, Level* levelPtr) const { + if (NULL == fLevels) { + return false; + } + if (scale >= SK_Scalar1) { return false; } diff --git a/src/core/SkMipMap.h b/src/core/SkMipMap.h index ed912ba976..4e83b12b3e 100644 --- a/src/core/SkMipMap.h +++ b/src/core/SkMipMap.h @@ -8,14 +8,17 @@ #ifndef SkMipMap_DEFINED #define SkMipMap_DEFINED -#include "SkRefCnt.h" +#include "SkCachedData.h" #include "SkScalar.h" class SkBitmap; +class SkDiscardableMemory; -class SkMipMap : public SkRefCnt { +typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes); + +class SkMipMap : public SkCachedData { public: - static SkMipMap* Build(const SkBitmap& src); + static SkMipMap* Build(const SkBitmap& src, SkDiscardableFactoryProc); struct Level { void* fPixels; @@ -26,18 +29,22 @@ public: bool extractLevel(SkScalar scale, Level*) const; - size_t getSize() const { return fSize; } +protected: + virtual void onDataChange(void* oldData, void* newData) SK_OVERRIDE { + fLevels = (Level*)newData; // could be NULL + } private: - size_t fSize; Level* fLevels; int fCount; // we take ownership of levels, and will free it with sk_free() - SkMipMap(Level* levels, int count, size_t size); - virtual ~SkMipMap(); + SkMipMap(void* malloc, size_t size) : INHERITED(malloc, size) {} + SkMipMap(size_t size, SkDiscardableMemory* dm) : INHERITED(size, dm) {} + + static size_t AllocLevelsSize(int levelCount, size_t pixelSize); - static Level* AllocLevels(int levelCount, size_t pixelSize); + typedef SkCachedData INHERITED; }; #endif diff --git a/src/core/SkResourceCache.cpp b/src/core/SkResourceCache.cpp index 3098a9a2a1..f7a810ec87 100644 --- a/src/core/SkResourceCache.cpp +++ b/src/core/SkResourceCache.cpp @@ -201,6 +201,18 @@ bool SkResourceCache::find(const Key& key, VisitorProc visitor, void* context) { return false; } +static void make_size_str(size_t size, SkString* str) { + const char suffix[] = { 'b', 'k', 'm', 'g', 't', 0 }; + int i = 0; + while (suffix[i] && (size > 1024)) { + i += 1; + size >>= 10; + } + str->printf("%zu%c", size, suffix[i]); +} + +static bool gDumpCacheTransactions; + void SkResourceCache::add(Rec* rec) { SkASSERT(rec); // See if we already have this key (racy inserts, etc.) @@ -213,6 +225,14 @@ void SkResourceCache::add(Rec* rec) { this->addToHead(rec); fHash->add(rec); + if (gDumpCacheTransactions) { + SkString bytesStr, totalStr; + make_size_str(rec->bytesUsed(), &bytesStr); + make_size_str(fTotalBytesUsed, &totalStr); + SkDebugf("RC: add %5s %12p key %08x -- total %5s, count %d\n", + bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount); + } + // since the new rec may push us over-budget, we perform a purge check now this->purgeAsNeeded(); } @@ -224,10 +244,18 @@ void SkResourceCache::remove(Rec* rec) { this->detach(rec); fHash->remove(rec->getKey()); - SkDELETE(rec); - fTotalBytesUsed -= used; fCount -= 1; + + if (gDumpCacheTransactions) { + SkString bytesStr, totalStr; + make_size_str(used, &bytesStr); + make_size_str(fTotalBytesUsed, &totalStr); + SkDebugf("RC: remove %5s %12p key %08x -- total %5s, count %d\n", + bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount); + } + + SkDELETE(rec); } void SkResourceCache::purgeAsNeeded(bool forcePurge) { diff --git a/src/core/SkResourceCache.h b/src/core/SkResourceCache.h index 52196985d3..665c0be498 100644 --- a/src/core/SkResourceCache.h +++ b/src/core/SkResourceCache.h @@ -10,6 +10,7 @@ #include "SkBitmap.h" +class SkCachedData; class SkDiscardableMemory; class SkMipMap; @@ -197,6 +198,8 @@ public: DiscardableFactory discardableFactory() const { return fDiscardableFactory; } SkBitmap::Allocator* allocator() const { return fAllocator; }; + SkCachedData* newCachedData(size_t bytes); + /** * Call SkDebugf() with diagnostic information about the state of the cache */ diff --git a/tests/CachedDataTest.cpp b/tests/CachedDataTest.cpp new file mode 100644 index 0000000000..f65a46b770 --- /dev/null +++ b/tests/CachedDataTest.cpp @@ -0,0 +1,95 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "SkCachedData.h" +#include "SkDiscardableMemoryPool.h" +#include "Test.h" + +enum LockedState { + kUnlocked, + kLocked, +}; + +enum CachedState { + kNotInCache, + kInCache, +}; + +static void check_data(skiatest::Reporter* reporter, SkCachedData* data, + int refcnt, CachedState cacheState, LockedState lockedState) { + REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt); + REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState)); + REPORTER_ASSERT(reporter, data->testing_only_isLocked() == (lockedState == kLocked)); +} + +static SkCachedData* make_data(size_t size, SkDiscardableMemoryPool* pool) { + if (pool) { + SkDiscardableMemory* dm = pool->create(size); + // the pool "can" return null, but it shouldn't in these controlled conditions + SK_ALWAYSBREAK(dm); + return SkNEW_ARGS(SkCachedData, (size, dm)); + } else { + return SkNEW_ARGS(SkCachedData, (sk_malloc_throw(size), size)); + } +} + +// returns with the data locked by client and cache +static SkCachedData* test_locking(skiatest::Reporter* reporter, + size_t size, SkDiscardableMemoryPool* pool) { + SkCachedData* data = make_data(size, pool); + + memset(data->writable_data(), 0x80, size); // just to use writable_data() + + check_data(reporter, data, 1, kNotInCache, kLocked); + + data->ref(); + check_data(reporter, data, 2, kNotInCache, kLocked); + data->unref(); + check_data(reporter, data, 1, kNotInCache, kLocked); + + data->attachToCacheAndRef(); + check_data(reporter, data, 2, kInCache, kLocked); + + data->unref(); + check_data(reporter, data, 1, kInCache, kUnlocked); + + data->ref(); + check_data(reporter, data, 2, kInCache, kLocked); + + return data; +} + +/* + * SkCachedData behaves differently (regarding its locked/unlocked state) depending on + * when it is in the cache or not. Being in the cache is signaled by calling attachToCacheAndRef() + * instead of ref(). (and balanced by detachFromCacheAndUnref). + * + * Thus, among other things, we test the end-of-life behavior when the client is the last owner + * and when the cache is. + */ +DEF_TEST(CachedData, reporter) { + SkAutoTUnref pool(SkDiscardableMemoryPool::Create(1000)); + + for (int useDiscardable = 0; useDiscardable <= 1; ++useDiscardable) { + const size_t size = 100; + + // test with client as last owner + SkCachedData* data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL); + check_data(reporter, data, 2, kInCache, kLocked); + data->detachFromCacheAndUnref(); + check_data(reporter, data, 1, kNotInCache, kLocked); + data->unref(); + + // test with cache as last owner + data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL); + check_data(reporter, data, 2, kInCache, kLocked); + data->unref(); + check_data(reporter, data, 1, kInCache, kUnlocked); + data->detachFromCacheAndUnref(); + } +} + diff --git a/tests/MipMapTest.cpp b/tests/MipMapTest.cpp index 33f467244b..c8396effa6 100644 --- a/tests/MipMapTest.cpp +++ b/tests/MipMapTest.cpp @@ -25,7 +25,7 @@ DEF_TEST(MipMap, reporter) { for (int i = 0; i < 500; ++i) { make_bitmap(&bm, rand); - SkAutoTUnref mm(SkMipMap::Build(bm)); + SkAutoTUnref mm(SkMipMap::Build(bm, NULL)); REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1, NULL)); REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1 * 2, NULL)); diff --git a/tests/SkResourceCacheTest.cpp b/tests/SkResourceCacheTest.cpp index f13476a5a3..0e941758ee 100644 --- a/tests/SkResourceCacheTest.cpp +++ b/tests/SkResourceCacheTest.cpp @@ -121,6 +121,55 @@ DEF_TEST(BitmapCache_add_rect, reporter) { REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache)); } +#include "SkMipMap.h" + +enum LockedState { + kNotLocked, + kLocked, +}; + +enum CachedState { + kNotInCache, + kInCache, +}; + +static void check_data(skiatest::Reporter* reporter, const SkCachedData* data, + int refcnt, CachedState cacheState, LockedState lockedState) { + REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt); + REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState)); + bool isLocked = (data->data() != NULL); + REPORTER_ASSERT(reporter, isLocked == (lockedState == kLocked)); +} + +static void test_mipmapcache(skiatest::Reporter* reporter, SkResourceCache* cache) { + cache->purgeAll(); + + SkBitmap src; + src.allocN32Pixels(5, 5); + src.setImmutable(); + + const SkMipMap* mipmap = SkMipMapCache::FindAndRef(src, cache); + REPORTER_ASSERT(reporter, NULL == mipmap); + + mipmap = SkMipMapCache::AddAndRef(src, cache); + REPORTER_ASSERT(reporter, mipmap); + check_data(reporter, mipmap, 2, kInCache, kLocked); + + mipmap->unref(); + // tricky, since technically after this I'm no longer an owner, but since the cache is + // local, I know it won't get purged behind my back + check_data(reporter, mipmap, 1, kInCache, kNotLocked); + + // find us again + mipmap = SkMipMapCache::FindAndRef(src, cache); + check_data(reporter, mipmap, 2, kInCache, kLocked); + + cache->purgeAll(); + check_data(reporter, mipmap, 1, kNotInCache, kLocked); + + mipmap->unref(); +} + DEF_TEST(BitmapCache_discarded_bitmap, reporter) { SkResourceCache::DiscardableFactory factory = SkResourceCache::GetDiscardableFactory(); SkBitmap::Allocator* allocator = SkBitmapCache::GetAllocator(); @@ -165,4 +214,6 @@ DEF_TEST(BitmapCache_discarded_bitmap, reporter) { // We can add the bitmap back to the cache and find it again. REPORTER_ASSERT(reporter, SkBitmapCache::Add(cachedBitmap.getGenerationID(), rect, cachedBitmap, cache)); REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache)); + + test_mipmapcache(reporter, cache); }