2 * Copyright 2014 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "src/core/SkTBlockList.h"
9 #include "tests/Test.h"
13 C() : fID(-1) { ++gInstCnt; }
14 C(int id) : fID(id) { ++gInstCnt; }
15 C(C&& c) : C(c.fID) {}
16 C(const C& c) : C(c.fID) {}
18 C& operator=(C&&) = default;
19 C& operator=(const C&) = default;
25 // Under the hood, SkTBlockList and SkBlockAllocator round up to max_align_t. If 'C' was
26 // just 4 bytes, that often means the internal blocks can squeeze a few extra instances in. This
27 // is fine, but makes predicting a little trickier, so make sure C is a bit bigger.
40 class TBlockListTestAccess {
43 static size_t ScratchBlockSize(SkTBlockList<C, N>& list) {
44 return (size_t) list.fAllocator->scratchBlockSize();
48 static size_t TotalSize(SkTBlockList<C, N>& list) {
49 return list.fAllocator->totalSize();
52 static constexpr size_t kAddressAlign = SkBlockAllocator::kAddressAlign;
55 // Checks that the allocator has the correct count, etc and that the element IDs are correct.
56 // Then pops popCnt items and checks again.
58 static void check_allocator_helper(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
59 skiatest::Reporter* reporter) {
60 REPORTER_ASSERT(reporter, (0 == cnt) == allocator->empty());
61 REPORTER_ASSERT(reporter, cnt == allocator->count());
62 REPORTER_ASSERT(reporter, cnt == C::gInstCnt);
65 for (const C& c : allocator->items()) {
66 REPORTER_ASSERT(reporter, i == c.fID);
67 REPORTER_ASSERT(reporter, allocator->item(i).fID == i);
70 REPORTER_ASSERT(reporter, i == cnt);
73 REPORTER_ASSERT(reporter, cnt-1 == allocator->back().fID);
77 for (i = 0; i < popCnt; ++i) {
78 allocator->pop_back();
80 check_allocator_helper(allocator, cnt - popCnt, 0, reporter);
85 static void check_iterator_helper(SkTBlockList<C, N>* allocator,
86 const std::vector<C*>& expected,
87 skiatest::Reporter* reporter) {
88 const SkTBlockList<C, N>* cAlloc = allocator;
89 REPORTER_ASSERT(reporter, (size_t) allocator->count() == expected.size());
92 for (const C& c : cAlloc->items()) {
93 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
96 REPORTER_ASSERT(reporter, (size_t) i == expected.size());
100 for (C& c : allocator->items()) {
101 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
104 REPORTER_ASSERT(reporter, (size_t) i == expected.size());
107 i = (int) expected.size() - 1;
108 for (const C& c : cAlloc->ritems()) {
109 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
112 REPORTER_ASSERT(reporter, i == -1);
115 i = (int) expected.size() - 1;
116 for (C& c : allocator->ritems()) {
117 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
120 REPORTER_ASSERT(reporter, i == -1);
122 // Also test random access
123 for (i = 0; i < allocator->count(); ++i) {
124 REPORTER_ASSERT(reporter, (uintptr_t) &allocator->item(i) == (uintptr_t) expected[i]);
125 REPORTER_ASSERT(reporter, (uintptr_t) &cAlloc->item(i) == (uintptr_t) expected[i]);
129 // Adds cnt items to the allocator, tests the cnts and iterators, pops popCnt items and checks
130 // again. Finally it resets the allocator and checks again.
132 static void check_allocator(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
133 skiatest::Reporter* reporter) {
134 enum ItemInitializer : int {
141 static constexpr int kInitCount = (int) kEmplace + 1;
144 SkASSERT(allocator->empty());
145 std::vector<C*> items;
146 for (int i = 0; i < cnt; ++i) {
147 switch((ItemInitializer) (i % kInitCount)) {
149 allocator->push_back(C(i));
152 allocator->push_back(std::move(C(i)));
155 allocator->push_back() = C(i);
158 allocator->push_back() = std::move(C(i));
161 allocator->emplace_back(i);
164 items.push_back(&allocator->back());
166 check_iterator_helper(allocator, items, reporter);
167 check_allocator_helper(allocator, cnt, popCnt, reporter);
169 check_iterator_helper(allocator, {}, reporter);
170 check_allocator_helper(allocator, 0, 0, reporter);
174 static void run_allocator_test(SkTBlockList<C, N>* allocator, skiatest::Reporter* reporter) {
175 check_allocator(allocator, 0, 0, reporter);
176 check_allocator(allocator, 1, 1, reporter);
177 check_allocator(allocator, 2, 2, reporter);
178 check_allocator(allocator, 10, 1, reporter);
179 check_allocator(allocator, 10, 5, reporter);
180 check_allocator(allocator, 10, 10, reporter);
181 check_allocator(allocator, 100, 10, reporter);
184 template<int N1, int N2>
185 static void run_concat_test(skiatest::Reporter* reporter, int aCount, int bCount) {
187 SkTBlockList<C, N1> listA;
188 SkTBlockList<C, N2> listB;
190 for (int i = 0; i < aCount; ++i) {
191 listA.emplace_back(i);
193 for (int i = 0; i < bCount; ++i) {
194 listB.emplace_back(aCount + i);
197 REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
198 REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
200 // Concatenate B into A and verify.
201 listA.concat(std::move(listB));
202 REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
203 // SkTBlockList guarantees the moved list is empty, but clang-tidy doesn't know about it;
204 // in practice we won't really be using moved lists so this won't pollute our main code base
205 // with lots of warning disables.
206 REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move)
207 REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
210 for (const C& item : listA.items()) {
211 // By construction of A and B originally, the concatenated id sequence is continuous
212 REPORTER_ASSERT(reporter, i == item.fID);
215 REPORTER_ASSERT(reporter, i == (aCount + bCount));
218 template<int N1, int N2>
219 static void run_concat_trivial_test(skiatest::Reporter* reporter, int aCount, int bCount) {
220 static_assert(std::is_trivially_copyable<D>::value);
222 // This is similar to run_concat_test(), except since D is trivial we can't verify the instant
223 // counts that are tracked via ctor/dtor.
224 SkTBlockList<D, N1> listA;
225 SkTBlockList<D, N2> listB;
227 for (int i = 0; i < aCount; ++i) {
228 listA.push_back({i});
230 for (int i = 0; i < bCount; ++i) {
231 listB.push_back({aCount + i});
234 REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
235 // Concatenate B into A and verify.
236 listA.concat(std::move(listB));
237 REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
238 REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move): see above
241 for (const D& item : listA.items()) {
242 // By construction of A and B originally, the concatenated id sequence is continuous
243 REPORTER_ASSERT(reporter, i == item.fID);
246 REPORTER_ASSERT(reporter, i == (aCount + bCount));
250 static void run_reserve_test(skiatest::Reporter* reporter) {
251 constexpr int kItemsPerBlock = N + 4; // Make this a number > 1, even if N starting items == 1
253 SkTBlockList<C, N> list(kItemsPerBlock);
254 size_t initialSize = TBlockListTestAccess::TotalSize(list);
255 // Should be able to add N instances of T w/o changing size from initialSize
256 for (int i = 0; i < N; ++i) {
257 list.push_back(C(i));
259 REPORTER_ASSERT(reporter, initialSize == TBlockListTestAccess::TotalSize(list));
261 // Reserve room for 2*kItemsPerBlock items
262 list.reserve(2 * kItemsPerBlock);
263 REPORTER_ASSERT(reporter, list.count() == N); // count shouldn't change though
265 size_t reservedSize = TBlockListTestAccess::TotalSize(list);
266 REPORTER_ASSERT(reporter, reservedSize >= initialSize + 2 * kItemsPerBlock * sizeof(C));
267 for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
268 list.push_back(C(i));
270 REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
272 // Make the next block partially fully (N > 0 but < kItemsPerBlock)
273 for (int i = 0; i < N; ++i) {
274 list.push_back(C(i));
277 // Reserve room again for 2*kItemsPerBlock, but reserve should automatically take account of the
278 // (kItemsPerBlock-N) that are still available in the active block
279 list.reserve(2 * kItemsPerBlock);
280 int extraReservedCount = kItemsPerBlock + N;
281 // Because SkTBlockList normally allocates blocks in fixed sizes, and extraReservedCount >
282 // items-per-block, it will always use that size and not that of the growth policy.
283 REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
284 extraReservedCount * sizeof(C));
286 reservedSize = TBlockListTestAccess::TotalSize(list);
287 for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
288 list.push_back(C(i));
290 REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
292 // If we reserve a count < items-per-block, it will use the fixed size from the growth policy.
294 REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
295 kItemsPerBlock * sizeof(C));
297 // Ensure the reservations didn't initialize any more D's than anticipated
298 int expectedInstanceCount = 2 * (N + 2 * kItemsPerBlock);
299 REPORTER_ASSERT(reporter, expectedInstanceCount == C::gInstCnt);
302 REPORTER_ASSERT(reporter, 0 == C::gInstCnt);
305 void run_large_increment_test(skiatest::Reporter* reporter) {
306 static constexpr size_t kIncrementMax = std::numeric_limits<uint16_t>::max();
307 // Pick an item count such that count*sizeof(C)/max align exceeds uint16_t max.
308 int itemCount = (int) (sizeof(C) * kIncrementMax / TBlockListTestAccess::kAddressAlign) + 1;
309 SkTBlockList<C> largeIncrement(itemCount);
310 // Trigger a scratch block allocation, which given default fixed growth policy, will be one
312 largeIncrement.reserve(10);
313 size_t scratchSize = TBlockListTestAccess::ScratchBlockSize(largeIncrement);
314 // SkBlockAllocator aligns large blocks to 4k
315 size_t expected = SkAlignTo(kIncrementMax * TBlockListTestAccess::kAddressAlign, (1 << 12));
316 REPORTER_ASSERT(reporter, scratchSize == expected);
319 DEF_TEST(SkTBlockList, reporter) {
320 // Test combinations of allocators with and without stack storage and with different block sizes
321 SkTBlockList<C> a1(1);
322 run_allocator_test(&a1, reporter);
324 SkTBlockList<C> a2(2);
325 run_allocator_test(&a2, reporter);
327 SkTBlockList<C> a5(5);
328 run_allocator_test(&a5, reporter);
330 SkTBlockList<C, 1> sa1;
331 run_allocator_test(&sa1, reporter);
333 SkTBlockList<C, 3> sa3;
334 run_allocator_test(&sa3, reporter);
336 SkTBlockList<C, 4> sa4;
337 run_allocator_test(&sa4, reporter);
339 run_reserve_test<1>(reporter);
340 run_reserve_test<2>(reporter);
341 run_reserve_test<3>(reporter);
342 run_reserve_test<4>(reporter);
343 run_reserve_test<5>(reporter);
345 run_concat_test<1, 1>(reporter, 10, 10);
346 run_concat_test<5, 1>(reporter, 50, 10);
347 run_concat_test<1, 5>(reporter, 10, 50);
348 run_concat_test<5, 5>(reporter, 100, 100);
350 run_concat_trivial_test<1, 1>(reporter, 10, 10);
351 run_concat_trivial_test<5, 1>(reporter, 50, 10);
352 run_concat_trivial_test<1, 5>(reporter, 10, 50);
353 run_concat_trivial_test<5, 5>(reporter, 100, 100);
355 run_large_increment_test(reporter);