From: Herb Derby Date: Fri, 13 Jan 2017 16:30:44 +0000 (-0500) Subject: Introduce SkArenaAlloc - should be fast for POD types and RAII for types with dtors. X-Git-Tag: accepted/tizen/5.0/unified/20181102.025319~55^2~838 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0497f088bb41338b1b1400556b9b690decc846fa;p=platform%2Fupstream%2FlibSkiaSharp.git Introduce SkArenaAlloc - should be fast for POD types and RAII for types with dtors. - Implementation. - Use in SkLinearPipeline. TBR=mtklein@google.com Change-Id: Ie014184469b217132b0307b5a9ae40c0c60e5fc9 Reviewed-on: https://skia-review.googlesource.com/6921 Reviewed-by: Herb Derby Commit-Queue: Herb Derby --- diff --git a/bench/SkLinearBitmapPipelineBench.cpp b/bench/SkLinearBitmapPipelineBench.cpp index f34cb45..020ce7f 100644 --- a/bench/SkLinearBitmapPipelineBench.cpp +++ b/bench/SkLinearBitmapPipelineBench.cpp @@ -149,8 +149,7 @@ struct SkBitmapFPGeneral final : public CommonBitmapFPBenchmark { char storage[600]; - SkFixedAlloc fixedAlloc{storage, sizeof(storage)}; - SkFallbackAlloc allocator{&fixedAlloc}; + SkArenaAlloc allocator{storage, sizeof(storage), 512}; SkLinearBitmapPipeline pipeline{ fInvert, filterQuality, fXTile, fYTile, SK_ColorBLACK, srcPixmap, &allocator}; diff --git a/gm/SkLinearBitmapPipelineGM.cpp b/gm/SkLinearBitmapPipelineGM.cpp index 43e4a24..e881645 100644 --- a/gm/SkLinearBitmapPipelineGM.cpp +++ b/gm/SkLinearBitmapPipelineGM.cpp @@ -117,8 +117,7 @@ static void draw_rect_fp(SkCanvas* canvas, const SkRect& r, SkColor c, const SkM auto procN = SkXfermode::GetD32Proc(SkBlendMode::kSrcOver, flags); char storage[512]; - SkFixedAlloc fixedAlloc{storage, sizeof(storage)}; - SkFallbackAlloc allocator{&fixedAlloc}; + SkArenaAlloc allocator{storage, sizeof(storage)}; SkLinearBitmapPipeline pipeline{ inv, filterQuality, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode, diff --git a/include/private/SkTLogic.h b/include/private/SkTLogic.h index 00a5aad..ee72549 100644 --- a/include/private/SkTLogic.h +++ b/include/private/SkTLogic.h @@ -57,8 +57,10 @@ template using common_type_t = typename std::common_type:: template struct underlying_type { using type = __underlying_type(T); }; +template using is_trivially_destructible = std::has_trivial_destructor; #else template using underlying_type = std::underlying_type; +template using is_trivially_destructible = std::is_trivially_destructible; #endif template using underlying_type_t = typename skstd::underlying_type::type; diff --git a/src/core/SkBitmapProcShader.cpp b/src/core/SkBitmapProcShader.cpp index 4bb61f1..11deac4 100644 --- a/src/core/SkBitmapProcShader.cpp +++ b/src/core/SkBitmapProcShader.cpp @@ -171,8 +171,7 @@ public: private: char fStorage[512 + 96]; - SkFixedAlloc fFixedAlloc {fStorage, sizeof(fStorage)}; - SkFallbackAlloc fAllocator {&fFixedAlloc}; + SkArenaAlloc fAllocator {fStorage, sizeof(fStorage)}; SkLinearBitmapPipeline* fShaderPipeline; SkLinearBitmapPipeline* fBlitterPipeline; SkXfermode::D32Proc fSrcModeProc; diff --git a/src/core/SkFixedAlloc.cpp b/src/core/SkFixedAlloc.cpp index cb2f798..c6b0725 100644 --- a/src/core/SkFixedAlloc.cpp +++ b/src/core/SkFixedAlloc.cpp @@ -7,6 +7,8 @@ #include "SkFixedAlloc.h" +#include + SkFixedAlloc::SkFixedAlloc(void* ptr, size_t len) : fStorage((char*)ptr), fCursor(fStorage), fEnd(fStorage + len) {} @@ -49,3 +51,142 @@ void SkFallbackAlloc::reset() { } fFixedAlloc->reset(); } + +struct Skipper { + char* operator()(char* objEnd, ptrdiff_t size) { return objEnd + size; } +}; + +struct NextBlock { + char* operator()(char* objEnd, ptrdiff_t size) { delete [] objEnd; return objEnd + size; } +}; + +SkArenaAlloc::SkArenaAlloc(char* block, size_t size, size_t extraSize) + : fDtorCursor{block} + , fCursor {block} + , fEnd {block + size} + , fExtraSize {extraSize} +{ + if (size < sizeof(Footer)) { + fEnd = fCursor = fDtorCursor = nullptr; + } + + if (fCursor != nullptr) { + this->installFooter(EndChain, 0); + } +} + +SkArenaAlloc::~SkArenaAlloc() { + this->reset(); +} + +void SkArenaAlloc::reset() { + Footer f; + memmove(&f, fDtorCursor - sizeof(Footer), sizeof(Footer)); + char* releaser = fDtorCursor; + while (releaser != nullptr) { + releaser = this->callFooterAction(releaser); + } +} + +void SkArenaAlloc::installFooter(FooterAction* releaser, ptrdiff_t padding) { + ptrdiff_t releaserDiff = (char *)releaser - (char *)EndChain; + ptrdiff_t footerData = SkLeftShift((int64_t)releaserDiff, 5) | padding; + if (padding >= 32 || !SkTFitsIn(footerData)) { + // Footer data will not fit. + SkFAIL("Constraints are busted."); + } + + Footer footer = (Footer)(footerData); + memmove(fCursor, &footer, sizeof(Footer)); + Footer check; + memmove(&check, fCursor, sizeof(Footer)); + fCursor += sizeof(Footer); + fDtorCursor = fCursor; +} + +void SkArenaAlloc::ensureSpace(size_t size, size_t alignment) { + constexpr size_t headerSize = sizeof(Footer) + sizeof(ptrdiff_t); + // The chrome c++ library we use does not define std::max_align_t. + // This must be conservative to add the right amount of extra memory to handle the alignment + // padding. + constexpr size_t alignof_max_align_t = 8; + auto objSizeAndOverhead = size + headerSize + sizeof(Footer); + if (alignment > alignof_max_align_t) { + objSizeAndOverhead += alignment - 1; + } + + auto allocationSize = std::max(objSizeAndOverhead, fExtraSize); + + // Round up to a nice size. If > 32K align to 4K boundary else up to max_align_t. The > 32K + // heuristic is from the JEMalloc behavior. + { + size_t mask = allocationSize > (1 << 15) ? (1 << 12) - 1 : 32 - 1; + allocationSize = (allocationSize + mask) & ~mask; + } + + char* newBlock = new char[allocationSize]; + + auto previousDtor = fDtorCursor; + fCursor = newBlock; + fDtorCursor = newBlock; + fEnd = fCursor + allocationSize; + this->installIntFooter(previousDtor - fCursor, 0); +} + +char* SkArenaAlloc::allocObject(size_t size, size_t alignment) { + size_t mask = alignment - 1; + char* objStart = (char*)((uintptr_t)(fCursor + mask) & ~mask); + if (objStart + size > fEnd) { + this->ensureSpace(size, alignment); + objStart = (char*)((uintptr_t)(fCursor + mask) & ~mask); + } + return objStart; +} + +// * sizeAndFooter - the memory for the footer in addition to the size for the object. +// * alignment - alignment needed by the object. +char* SkArenaAlloc::allocObjectWithFooter(size_t sizeIncludingFooter, size_t alignment) { + size_t mask = alignment - 1; + + restart: + size_t skipOverhead = 0; + bool needsSkipFooter = fCursor != fDtorCursor; + if (needsSkipFooter) { + size_t skipSize = SkTFitsIn(fDtorCursor - fCursor) + ? sizeof(int32_t) + : sizeof(ptrdiff_t); + skipOverhead = sizeof(Footer) + skipSize; + } + char* objStart = (char*)((uintptr_t)(fCursor + skipOverhead + mask) & ~mask); + size_t totalSize = sizeIncludingFooter + skipOverhead; + + if (objStart + totalSize > fEnd) { + this->ensureSpace(totalSize, alignment); + goto restart; + } + + SkASSERT(objStart + totalSize <= fEnd); + + // Install a skip footer if needed, thus terminating a run of POD data. The calling code is + // responsible for installing the footer after the object. + if (needsSkipFooter) { + this->installIntFooter(fDtorCursor - fCursor, 0); + } + + return objStart; +} + +char* SkArenaAlloc::callFooterAction(char* end) { + Footer footer; + memcpy(&footer, end - sizeof(Footer), sizeof(Footer)); + + FooterAction* releaser = (FooterAction*)((char*)EndChain + (footer >> 5)); + ptrdiff_t padding = footer & 31; + + char* r = releaser(end) - padding; + + return r; +} + +char* SkArenaAlloc::EndChain(char*) { return nullptr; } + diff --git a/src/core/SkFixedAlloc.h b/src/core/SkFixedAlloc.h index f969986..acb6895 100644 --- a/src/core/SkFixedAlloc.h +++ b/src/core/SkFixedAlloc.h @@ -10,7 +10,9 @@ #include "SkTFitsIn.h" #include "SkTypes.h" +#include #include +#include #include #include @@ -96,13 +98,14 @@ public: return new (ptr) T(std::forward(args)...); } - // Destroys the last object allocated and frees any space it used in the SkFixedAlloc. - void undo(); // Destroys all objects and frees all space in the SkFixedAlloc. void reset(); private: + // Destroys the last object allocated and frees any space it used in the SkFixedAlloc. + void undo(); + struct HeapAlloc { void (*deleter)(char*); char* ptr; @@ -112,4 +115,189 @@ private: std::vector fHeapAllocs; }; +// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed +// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an +// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap, +// starting with an allocation of extraSize bytes. If your data (plus a small overhead) fits in +// the user-provided block, SkArenaAlloc never uses the heap, and if it fits in extraSize bytes, +// it'll use the heap only once. If you pass extraSize = 0, it allocates blocks for each call to +// make. +// +// Examples: +// +// char block[mostCasesSize]; +// SkArenaAlloc arena(block, almostAllCasesSize); +// +// If mostCasesSize is too large for the stack, you can use the following pattern. +// +// std::unique_ptr block{new char[mostCasesSize]}; +// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize); +// +// If the program only sometimes allocates memory, use the following. +// +// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize); +// +// The storage does not necessarily need to be on the stack. Embedding the storage in a class also +// works. +// +// class Foo { +// char storage[mostCasesSize]; +// SkArenaAlloc arena (storage, almostAllCasesSize); +// }; +// +// In addition, the system is optimized to handle POD data including arrays of PODs (where +// POD is really data with no destructors). For POD data it has zero overhead per item, and a +// typical block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 bytes. +// For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There is an +// addition overhead when switching from POD data to non-POD data of typically 8 bytes. +class SkArenaAlloc { +public: + SkArenaAlloc(char* block, size_t size, size_t extraSize = 0); + + template + SkArenaAlloc(char (&block)[kSize], size_t extraSize = 0) + : SkArenaAlloc(block, kSize, extraSize) + {} + + ~SkArenaAlloc(); + + template + T* make(Args&&... args) { + char* objStart; + if (skstd::is_trivially_destructible::value) { + objStart = this->allocObject(sizeof(T), alignof(T)); + fCursor = objStart + sizeof(T); + } else { + objStart = this->allocObjectWithFooter(sizeof(T) + sizeof(Footer), alignof(T)); + size_t padding = objStart - fCursor; + + // Advance to end of object to install footer. + fCursor = objStart + sizeof(T); + FooterAction* releaser = [](char* objEnd) { + char* objStart = objEnd - (sizeof(T) + sizeof(Footer)); + ((T*)objStart)->~T(); + return objStart; + }; + this->installFooter(releaser, padding); + } + + // This must be last to make objects with nested use of this allocator work. + return new(objStart) T(std::forward(args)...); + } + + template + T* makeArrayDefault(size_t count) { + T* array = (T*)this->commonArrayAlloc(count); + + // If T is primitive then no initialization takes place. + for (size_t i = 0; i < count; i++) { + new (&array[i]) T; + } + return array; + } + + template + T* makeArray(size_t count) { + T* array = (T*)this->commonArrayAlloc(count); + + // If T is primitive then the memory is initialized. For example, an array of chars will + // be zeroed. + for (size_t i = 0; i < count; i++) { + new (&array[i]) T(); + } + return array; + } + + // Destroy all allocated objects, free any heap allocations. + void reset(); + +private: + using Footer = int32_t; + using FooterAction = char* (char*); + + void installFooter(FooterAction* releaser, ptrdiff_t padding); + + // N.B. Action is different than FooterAction. FooterAction expects the end of the Footer, + // and returns the start of the object. An Action expects the end of the *Object* and returns + // the start of the object. + template + void installIntFooter(ptrdiff_t size, ptrdiff_t padding) { + if (SkTFitsIn(size)) { + int32_t smallSize = static_cast(size); + memmove(fCursor, &smallSize, sizeof(int32_t)); + fCursor += sizeof(int32_t); + this->installFooter( + [](char* footerEnd) { + char* objEnd = footerEnd - (sizeof(Footer) + sizeof(int32_t)); + int32_t data; + memmove(&data, objEnd, sizeof(int32_t)); + return Action()(objEnd, data); + }, + padding); + } else { + memmove(fCursor, &size, sizeof(ptrdiff_t)); + fCursor += sizeof(ptrdiff_t); + this->installFooter( + [](char* footerEnd) { + char* objEnd = footerEnd - (sizeof(Footer) + sizeof(ptrdiff_t)); + ptrdiff_t data; + memmove(&data, objEnd, sizeof(ptrdiff_t)); + return Action()(objEnd, data); + }, + padding); + } + } + + void ensureSpace(size_t size, size_t alignment); + + char* allocObject(size_t size, size_t alignment); + + char* allocObjectWithFooter(size_t sizeIncludingFooter, size_t alignment); + + template + char* commonArrayAlloc(size_t count) { + char* objStart; + size_t arraySize = count * sizeof(T); + + SkASSERT(arraySize > 0); + + if (skstd::is_trivially_destructible::value) { + objStart = this->allocObject(arraySize, alignof(T)); + fCursor = objStart + arraySize; + } else { + size_t countSize = SkTFitsIn(count) ? sizeof(int32_t) : sizeof(ptrdiff_t); + size_t totalSize = arraySize + sizeof(Footer) + countSize; + objStart = this->allocObjectWithFooter(totalSize, alignof(T)); + size_t padding = objStart - fCursor; + + // Advance to end of array to install footer.? + fCursor = objStart + arraySize; + this->installIntFooter> (count, padding); + } + + return objStart; + } + + char* callFooterAction(char* end); + + static char* EndChain(char*); + + template + struct ArrayDestructor { + char* operator()(char* objEnd, ptrdiff_t count) { + char* objStart = objEnd - count * sizeof(T); + T* array = (T*) objStart; + for (int i = 0; i < count; i++) { + array[i].~T(); + } + return objStart; + } + }; + + char* fDtorCursor; + char* fCursor; + char* fEnd; + size_t fExtraSize; +}; + #endif//SkFixedAlloc_DEFINED diff --git a/src/core/SkLinearBitmapPipeline.cpp b/src/core/SkLinearBitmapPipeline.cpp index 9e8b419..1c3b7a5 100644 --- a/src/core/SkLinearBitmapPipeline.cpp +++ b/src/core/SkLinearBitmapPipeline.cpp @@ -351,7 +351,7 @@ SkLinearBitmapPipeline::SkLinearBitmapPipeline( SkShader::TileMode xTile, SkShader::TileMode yTile, SkColor paintColor, const SkPixmap& srcPixmap, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { SkISize dimensions = srcPixmap.info().dimensions(); const SkImageInfo& srcImageInfo = srcPixmap.info(); @@ -393,7 +393,7 @@ SkLinearBitmapPipeline::SkLinearBitmapPipeline( const SkPixmap& srcPixmap, SkBlendMode mode, const SkImageInfo& dstInfo, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { SkASSERT(mode == SkBlendMode::kSrc || mode == SkBlendMode::kSrcOver); SkASSERT(srcPixmap.info().colorType() == dstInfo.colorType() @@ -425,7 +425,7 @@ SkLinearBitmapPipeline* SkLinearBitmapPipeline::ClonePipelineForBlitting( float finalAlpha, SkBlendMode blendMode, const SkImageInfo& dstInfo, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { if (blendMode == SkBlendMode::kSrcOver && srcPixmap.info().alphaType() == kOpaque_SkAlphaType) { blendMode = SkBlendMode::kSrc; @@ -469,7 +469,7 @@ SkLinearBitmapPipeline::PointProcessorInterface* SkLinearBitmapPipeline::chooseMatrix( PointProcessorInterface* next, const SkMatrix& inverse, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { if (inverse.hasPerspective()) { auto matrixStage = allocator->make>( @@ -480,7 +480,7 @@ SkLinearBitmapPipeline::chooseMatrix( SkVector{inverse.getPerspX(), inverse.getPerspY()}, inverse.get(SkMatrix::kMPersp2)); fMatrixStageCloner = - [matrixStage](PointProcessorInterface* cloneNext, SkFallbackAlloc* memory) { + [matrixStage](PointProcessorInterface* cloneNext, SkArenaAlloc* memory) { return memory->make>(cloneNext, matrixStage); }; return matrixStage; @@ -491,7 +491,7 @@ SkLinearBitmapPipeline::chooseMatrix( SkVector{inverse.getScaleX(), inverse.getScaleY()}, SkVector{inverse.getSkewX(), inverse.getSkewY()}); fMatrixStageCloner = - [matrixStage](PointProcessorInterface* cloneNext, SkFallbackAlloc* memory) { + [matrixStage](PointProcessorInterface* cloneNext, SkArenaAlloc* memory) { return memory->make>(cloneNext, matrixStage); }; return matrixStage; @@ -501,7 +501,7 @@ SkLinearBitmapPipeline::chooseMatrix( SkVector{inverse.getTranslateX(), inverse.getTranslateY()}, SkVector{inverse.getScaleX(), inverse.getScaleY()}); fMatrixStageCloner = - [matrixStage](PointProcessorInterface* cloneNext, SkFallbackAlloc* memory) { + [matrixStage](PointProcessorInterface* cloneNext, SkArenaAlloc* memory) { return memory->make>(cloneNext, matrixStage); }; return matrixStage; @@ -510,12 +510,12 @@ SkLinearBitmapPipeline::chooseMatrix( next, SkVector{inverse.getTranslateX(), inverse.getTranslateY()}); fMatrixStageCloner = - [matrixStage](PointProcessorInterface* cloneNext, SkFallbackAlloc* memory) { + [matrixStage](PointProcessorInterface* cloneNext, SkArenaAlloc* memory) { return memory->make>(cloneNext, matrixStage); }; return matrixStage; } else { - fMatrixStageCloner = [](PointProcessorInterface* cloneNext, SkFallbackAlloc* memory) { + fMatrixStageCloner = [](PointProcessorInterface* cloneNext, SkArenaAlloc* memory) { return cloneNext; }; return next; @@ -526,12 +526,12 @@ template SkLinearBitmapPipeline::PointProcessorInterface* SkLinearBitmapPipeline::createTiler( SampleProcessorInterface* next, SkISize dimensions, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { auto tilerStage = allocator->make(next, dimensions); fTileStageCloner = [tilerStage](SampleProcessorInterface* cloneNext, - SkFallbackAlloc* memory) -> PointProcessorInterface* { + SkArenaAlloc* memory) -> PointProcessorInterface* { return memory->make(cloneNext, tilerStage); }; return tilerStage; @@ -542,7 +542,7 @@ SkLinearBitmapPipeline::PointProcessorInterface* SkLinearBitmapPipeline::chooseT SampleProcessorInterface* next, SkShader::TileMode yMode, SkISize dimensions, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { switch (yMode) { case SkShader::kClamp_TileMode: { @@ -571,7 +571,7 @@ SkLinearBitmapPipeline::PointProcessorInterface* SkLinearBitmapPipeline::chooseT SkShader::TileMode yMode, SkFilterQuality filterQuality, SkScalar dx, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { switch (xMode) { case SkShader::kClamp_TileMode: @@ -597,7 +597,7 @@ template SkLinearBitmapPipeline::PixelAccessorInterface* SkLinearBitmapPipeline::chooseSpecificAccessor( const SkPixmap& srcPixmap, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { if (srcPixmap.info().gammaCloseToSRGB()) { using Accessor = PixelAccessor; @@ -611,7 +611,7 @@ SkLinearBitmapPipeline::PixelAccessorInterface* SkLinearBitmapPipeline::PixelAccessorInterface* SkLinearBitmapPipeline::choosePixelAccessor( const SkPixmap& srcPixmap, const SkColor A8TintColor, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { const SkImageInfo& imageInfo = srcPixmap.info(); @@ -649,7 +649,7 @@ SkLinearBitmapPipeline::SampleProcessorInterface* SkLinearBitmapPipeline::choose SkShader::TileMode xTile, SkShader::TileMode yTile, const SkPixmap& srcPixmap, const SkColor A8TintColor, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { const SkImageInfo& imageInfo = srcPixmap.info(); SkISize dimensions = imageInfo.dimensions(); @@ -707,7 +707,7 @@ SkLinearBitmapPipeline::SampleProcessorInterface* SkLinearBitmapPipeline::choose Blender* SkLinearBitmapPipeline::chooseBlenderForShading( SkAlphaType alphaType, float postAlpha, - SkFallbackAlloc* allocator) + SkArenaAlloc* allocator) { if (alphaType == kUnpremul_SkAlphaType) { return allocator->make>(postAlpha); diff --git a/src/core/SkLinearBitmapPipeline.h b/src/core/SkLinearBitmapPipeline.h index 62d2201..237a165 100644 --- a/src/core/SkLinearBitmapPipeline.h +++ b/src/core/SkLinearBitmapPipeline.h @@ -34,14 +34,14 @@ public: SkShader::TileMode xTile, SkShader::TileMode yTile, SkColor paintColor, const SkPixmap& srcPixmap, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); SkLinearBitmapPipeline( const SkLinearBitmapPipeline& pipeline, const SkPixmap& srcPixmap, SkBlendMode, const SkImageInfo& dstInfo, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); static SkLinearBitmapPipeline* ClonePipelineForBlitting( const SkLinearBitmapPipeline& pipeline, @@ -51,7 +51,7 @@ public: float finalAlpha, SkBlendMode, const SkImageInfo& dstInfo, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); ~SkLinearBitmapPipeline(); @@ -65,23 +65,23 @@ public: class PixelAccessorInterface; using MatrixCloner = - std::function; + std::function; using TilerCloner = - std::function; + std::function; PointProcessorInterface* chooseMatrix( PointProcessorInterface* next, const SkMatrix& inverse, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); template PointProcessorInterface* createTiler(SampleProcessorInterface* next, SkISize dimensions, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); template PointProcessorInterface* chooseTilerYMode( SampleProcessorInterface* next, SkShader::TileMode yMode, SkISize dimensions, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); PointProcessorInterface* chooseTiler( SampleProcessorInterface* next, @@ -89,16 +89,16 @@ public: SkShader::TileMode xMode, SkShader::TileMode yMode, SkFilterQuality filterQuality, SkScalar dx, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); template PixelAccessorInterface* chooseSpecificAccessor(const SkPixmap& srcPixmap, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); PixelAccessorInterface* choosePixelAccessor( const SkPixmap& srcPixmap, const SkColor A8TintColor, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); SampleProcessorInterface* chooseSampler( BlendProcessorInterface* next, @@ -106,12 +106,12 @@ public: SkShader::TileMode xTile, SkShader::TileMode yTile, const SkPixmap& srcPixmap, const SkColor A8TintColor, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); BlendProcessorInterface* chooseBlenderForShading( SkAlphaType alphaType, float postAlpha, - SkFallbackAlloc* allocator); + SkArenaAlloc* allocator); PointProcessorInterface* fFirstStage; MatrixCloner fMatrixStageCloner; diff --git a/tests/FixedAllocTest.cpp b/tests/FixedAllocTest.cpp index 0a00f00..75bc232 100644 --- a/tests/FixedAllocTest.cpp +++ b/tests/FixedAllocTest.cpp @@ -13,7 +13,8 @@ namespace { static int created, destroyed; struct Foo { - Foo(int X, float Y) : x(X), y(Y) { created++; } + Foo() : x(-2), y(-3.0f) { created++; } + Foo(int X, float Y) : x(X), y(Y) { created++; } ~Foo() { destroyed++; } int x; @@ -111,3 +112,91 @@ DEF_TEST(FallbackAlloc, r) { REPORTER_ASSERT(r, !in_buf(big)); REPORTER_ASSERT(r, !in_buf(smallB)); } + +struct WithDtor { + ~WithDtor() { } +}; + +DEF_TEST(ArenaAlloc, r) { + + { + created = 0; + destroyed = 0; + + SkArenaAlloc arena{nullptr, 0}; + REPORTER_ASSERT(r, *arena.make(3) == 3); + Foo* foo = arena.make(3, 4.0f); + REPORTER_ASSERT(r, foo->x == 3); + REPORTER_ASSERT(r, foo->y == 4.0f); + REPORTER_ASSERT(r, created == 1); + REPORTER_ASSERT(r, destroyed == 0); + arena.makeArrayDefault(10); + int* zeroed = arena.makeArray(10); + for (int i = 0; i < 10; i++) { + REPORTER_ASSERT(r, zeroed[i] == 0); + } + Foo* fooArray = arena.makeArrayDefault(10); + REPORTER_ASSERT(r, fooArray[3].x == -2); + REPORTER_ASSERT(r, fooArray[4].y == -3.0f); + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 0); + arena.make::type>(); + } + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 11); + + { + created = 0; + destroyed = 0; + char block[1024]; + SkArenaAlloc arena{block}; + + REPORTER_ASSERT(r, *arena.make(3) == 3); + Foo* foo = arena.make(3, 4.0f); + REPORTER_ASSERT(r, foo->x == 3); + REPORTER_ASSERT(r, foo->y == 4.0f); + REPORTER_ASSERT(r, created == 1); + REPORTER_ASSERT(r, destroyed == 0); + arena.makeArrayDefault(10); + int* zeroed = arena.makeArray(10); + for (int i = 0; i < 10; i++) { + REPORTER_ASSERT(r, zeroed[i] == 0); + } + Foo* fooArray = arena.makeArrayDefault(10); + REPORTER_ASSERT(r, fooArray[3].x == -2); + REPORTER_ASSERT(r, fooArray[4].y == -3.0f); + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 0); + arena.make::type>(); + } + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 11); + + { + created = 0; + destroyed = 0; + std::unique_ptr block{new char[1024]}; + SkArenaAlloc arena{block.get(), 1024}; + + REPORTER_ASSERT(r, *arena.make(3) == 3); + Foo* foo = arena.make(3, 4.0f); + REPORTER_ASSERT(r, foo->x == 3); + REPORTER_ASSERT(r, foo->y == 4.0f); + REPORTER_ASSERT(r, created == 1); + REPORTER_ASSERT(r, destroyed == 0); + arena.makeArrayDefault(10); + int* zeroed = arena.makeArray(10); + for (int i = 0; i < 10; i++) { + REPORTER_ASSERT(r, zeroed[i] == 0); + } + Foo* fooArray = arena.makeArrayDefault(10); + REPORTER_ASSERT(r, fooArray[3].x == -2); + REPORTER_ASSERT(r, fooArray[4].y == -3.0f); + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 0); + arena.make::type>(); + } + REPORTER_ASSERT(r, created == 11); + REPORTER_ASSERT(r, destroyed == 11); + +}