#define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc)
/**
- * Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly
- * more), then the allocation will come from the stack rather than the heap. This object is the
- * sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on
- * the block.
+ * Manage an allocated block of memory. If the requested size is <= kSize, then
+ * the allocation will come from the stack rather than the heap. This object
+ * is the sole manager of the lifetime of the block, so the caller must not
+ * call sk_free() or delete on the block.
*/
-template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable {
+template <size_t kSize> class SkAutoSMalloc : SkNoncopyable {
public:
/**
- * Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation.
- * Must call reset(size) to return an allocated block.
+ * Creates initially empty storage. get() returns a ptr, but it is to
+ * a zero-byte allocation. Must call reset(size) to return an allocated
+ * block.
*/
SkAutoSMalloc() {
fPtr = fStorage;
}
/**
- * Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then
- * the allocation will come from the stack, otherwise it will be dynamically allocated.
+ * Allocate a block of the specified size. If size <= kSize, then the
+ * allocation will come from the stack, otherwise it will be dynamically
+ * allocated.
*/
explicit SkAutoSMalloc(size_t size) {
fPtr = fStorage;
}
/**
- * Free the allocated block (if any). If the block was small enough to have been allocated on
- * the stack, then this does nothing.
+ * Free the allocated block (if any). If the block was small enought to
+ * have been allocated on the stack (size <= kSize) then this does nothing.
*/
~SkAutoSMalloc() {
if (fPtr != (void*)fStorage) {
}
/**
- * Return the allocated block. May return non-null even if the block is of zero size. Since
- * this may be on the stack or dynamically allocated, the caller must not call sk_free() on it,
- * but must rely on SkAutoSMalloc to manage it.
+ * Return the allocated block. May return non-null even if the block is
+ * of zero size. Since this may be on the stack or dynamically allocated,
+ * the caller must not call sk_free() on it, but must rely on SkAutoSMalloc
+ * to manage it.
*/
void* get() const { return fPtr; }
/**
- * Return a new block of the requested size, freeing (as necessary) any previously allocated
- * block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return
- * block may be allocated locally, rather than from the heap.
+ * Return a new block of the requested size, freeing (as necessary) any
+ * previously allocated block. As with the constructor, if size <= kSize
+ * then the return block may be allocated locally, rather than from the
+ * heap.
*/
void* reset(size_t size,
SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
}
private:
- // Align up to 32 bits.
- static const size_t kSizeAlign4 = SkAlign4(kSizeRequested);
-#if defined(GOOGLE3)
- // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
- // have multiple large stack allocations.
- static const size_t kMaxBytes = 4 * 1024;
- static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4;
-#else
- static const size_t kSize = kSizeAlign4;
-#endif
-
void* fPtr;
size_t fSize; // can be larger than the requested size (see kReuse)
- uint32_t fStorage[kSize >> 2];
+ uint32_t fStorage[(kSize + 3) >> 2];
};
// Can't guard the constructor because it's a template class.
SkDEBUGCODE(int fCount;)
};
-/** Wraps SkAutoTArray, with room for kCountRequested elements preallocated.
+/** Wraps SkAutoTArray, with room for up to N elements preallocated
*/
-template <int kCountRequested, typename T> class SkAutoSTArray : SkNoncopyable {
+template <int N, typename T> class SkAutoSTArray : SkNoncopyable {
public:
/** Initialize with no objects */
SkAutoSTArray() {
}
if (fCount != count) {
- if (fCount > kCount) {
+ if (fCount > N) {
// 'fArray' was allocated last time so free it now
SkASSERT((T*) fStorage != fArray);
sk_free(fArray);
}
- if (count > kCount) {
+ if (count > N) {
const uint64_t size64 = sk_64_mul(count, sizeof(T));
const size_t size = static_cast<size_t>(size64);
if (size != size64) {
}
private:
-#if defined(GOOGLE3)
- // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
- // have multiple large stack allocations.
- static const int kMaxBytes = 4 * 1024;
- static const int kCount = kCountRequested * sizeof(T) > kMaxBytes
- ? kMaxBytes / sizeof(T)
- : kCountRequested;
-#else
- static const int kCount = kCountRequested;
-#endif
-
int fCount;
T* fArray;
// since we come right after fArray, fStorage should be properly aligned
- char fStorage[kCount * sizeof(T)];
+ char fStorage[N * sizeof(T)];
};
/** Manages an array of T elements, freeing the array in the destructor.
T* fPtr;
};
-template <size_t kCountRequested, typename T> class SkAutoSTMalloc : SkNoncopyable {
+template <size_t N, typename T> class SkAutoSTMalloc : SkNoncopyable {
public:
SkAutoSTMalloc() : fPtr(fTStorage) {}
SkAutoSTMalloc(size_t count) {
- if (count > kCount) {
+ if (count > N) {
fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_MALLOC_TEMP);
} else {
fPtr = fTStorage;
if (fPtr != fTStorage) {
sk_free(fPtr);
}
- if (count > kCount) {
+ if (count > N) {
fPtr = (T*)sk_malloc_throw(count * sizeof(T));
} else {
fPtr = fTStorage;
// Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent
void realloc(size_t count) {
- if (count > kCount) {
+ if (count > N) {
if (fPtr == fTStorage) {
fPtr = (T*)sk_malloc_throw(count * sizeof(T));
- memcpy(fPtr, fTStorage, kCount * sizeof(T));
+ memcpy(fPtr, fTStorage, N * sizeof(T));
} else {
fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T));
}
}
private:
- // Since we use uint32_t storage, we might be able to get more elements for free.
- static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T);
-#if defined(GOOGLE3)
- // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
- // have multiple large stack allocations.
- static const size_t kMaxBytes = 4 * 1024;
- static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes
- ? kMaxBytes / sizeof(T)
- : kCountWithPadding;
-#else
- static const size_t kCount = kCountWithPadding;
-#endif
-
T* fPtr;
union {
- uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2];
+ uint32_t fStorage32[(N*sizeof(T) + 3) >> 2];
T fTStorage[1]; // do NOT want to invoke T::T()
};
};
}
// temp buffer for doing sw premul conversion, if needed.
+#if defined(GOOGLE3)
+ // Stack frame size is limited in GOOGLE3.
+ SkAutoSTMalloc<48 * 48, uint32_t> tmpPixels(0);
+#else
SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
+#endif
if (tempTexture) {
SkAutoTUnref<const GrFragmentProcessor> fp;
SkMatrix textureMatrix;
instances.count());
} else {
int floatsPerTransform = GrPathRendering::PathTransformSize(this->transformType());
+#if defined(GOOGLE3)
+ //Stack frame size is limited in GOOGLE3.
+ SkAutoSTMalloc<512, float> transformStorage(floatsPerTransform * fTotalPathCount);
+ SkAutoSTMalloc<256, uint16_t> indexStorage(fTotalPathCount);
+#else
SkAutoSTMalloc<4096, float> transformStorage(floatsPerTransform * fTotalPathCount);
SkAutoSTMalloc<2048, uint16_t> indexStorage(fTotalPathCount);
+#endif
int idx = 0;
for (DrawList::Iter iter(fDraws); iter.get(); iter.next()) {
const Draw& draw = *iter.get();
size_t trimRowBytes = width * bpp;
// in case we need a temporary, trimmed copy of the src pixels
+#if defined(GOOGLE3)
+ // Stack frame size is limited in GOOGLE3.
+ SkAutoSMalloc<64 * 128> tempStorage;
+#else
SkAutoSMalloc<128 * 128> tempStorage;
+#endif
// Internal format comes from the texture desc.
GrGLenum internalFormat;