2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "wtf/PartitionAlloc.h"
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/OwnPtr.h"
36 #include "wtf/PassOwnPtr.h"
37 #include <gtest/gtest.h>
45 #define MAP_ANONYMOUS MAP_ANON
49 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
53 static const size_t kTestMaxAllocation = 4096;
54 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
55 static PartitionAllocatorGeneric genericAllocator;
57 static const size_t kTestAllocSize = 16;
59 static const size_t kPointerOffset = 0;
60 static const size_t kExtraAllocSize = 0;
62 static const size_t kPointerOffset = WTF::kCookieSize;
63 static const size_t kExtraAllocSize = WTF::kCookieSize * 2;
65 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
66 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
68 static void TestSetup()
71 genericAllocator.init();
74 static void TestShutdown()
76 // We expect no leaks in the general case. We have a test for leak
78 EXPECT_TRUE(allocator.shutdown());
79 EXPECT_TRUE(genericAllocator.shutdown());
82 static WTF::PartitionPage* GetFullPage(size_t size)
84 size_t realSize = size + kExtraAllocSize;
85 size_t bucketIdx = realSize >> WTF::kBucketShift;
86 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
87 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize;
91 for (i = 0; i < numSlots; ++i) {
92 void* ptr = partitionAlloc(allocator.root(), size);
95 first = WTF::partitionCookieFreePointerAdjust(ptr);
96 else if (i == numSlots - 1)
97 last = WTF::partitionCookieFreePointerAdjust(ptr);
99 EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(last));
100 if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPage)
101 EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
102 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
103 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
104 EXPECT_TRUE(bucket->activePagesHead);
105 EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage);
106 return bucket->activePagesHead;
109 static void FreeFullPage(WTF::PartitionPage* page)
111 size_t size = page->bucket->slotSize;
112 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / size;
113 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
114 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
116 for (i = 0; i < numSlots; ++i) {
117 partitionFree(ptr + kPointerOffset);
122 static void CycleFreeCache(size_t size)
124 size_t realSize = size + kExtraAllocSize;
125 size_t bucketIdx = realSize >> WTF::kBucketShift;
126 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
127 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
129 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
130 void* ptr = partitionAlloc(allocator.root(), size);
131 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
133 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
134 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
138 static void CycleGenericFreeCache(size_t size)
140 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
141 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
142 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
143 WTF::PartitionBucket* bucket = page->bucket;
144 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
145 partitionFreeGeneric(genericAllocator.root(), ptr);
146 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
147 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
151 // Check that the most basic of allocate / free pairs work.
152 TEST(PartitionAllocTest, Basic)
155 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
156 WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage;
158 EXPECT_FALSE(bucket->freePagesHead);
159 EXPECT_EQ(seedPage, bucket->activePagesHead);
160 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
162 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
164 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
165 // Check that the offset appears to include a guard page.
166 EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
169 // Expect that the last active page does not get tossed to the freelist.
170 EXPECT_FALSE(bucket->freePagesHead);
175 // Check that we can detect a memory leak.
176 TEST(PartitionAllocTest, SimpleLeak)
179 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
181 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
183 EXPECT_FALSE(allocator.shutdown());
184 EXPECT_FALSE(genericAllocator.shutdown());
187 // Test multiple allocations, and freelist handling.
188 TEST(PartitionAllocTest, MultiAlloc)
192 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
193 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
196 ptrdiff_t diff = ptr2 - ptr1;
197 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
199 // Check that we re-use the just-freed slot.
201 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
204 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
206 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
209 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
211 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
214 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
223 // Test a bucket with multiple pages.
224 TEST(PartitionAllocTest, MultiPages)
227 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
229 WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
231 EXPECT_FALSE(bucket->freePagesHead);
232 EXPECT_EQ(page, bucket->activePagesHead);
233 EXPECT_EQ(0, page->nextPage);
234 EXPECT_EQ(0, page->numAllocatedSlots);
236 page = GetFullPage(kTestAllocSize);
237 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
239 EXPECT_EQ(page2, bucket->activePagesHead);
240 EXPECT_EQ(0, page2->nextPage);
241 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
243 // Fully free the non-current page. It should not be freelisted because
244 // there is no other immediately useable page. The other page is full.
246 EXPECT_EQ(0, page->numAllocatedSlots);
247 EXPECT_FALSE(bucket->freePagesHead);
248 EXPECT_EQ(page, bucket->activePagesHead);
250 // Allocate a new page, it should pull from the freelist.
251 page = GetFullPage(kTestAllocSize);
252 EXPECT_FALSE(bucket->freePagesHead);
253 EXPECT_EQ(page, bucket->activePagesHead);
257 EXPECT_EQ(0, page->numAllocatedSlots);
258 EXPECT_EQ(0, page2->numAllocatedSlots);
259 EXPECT_EQ(0, page2->numUnprovisionedSlots);
260 EXPECT_NE(-1, page2->freeCacheIndex);
265 // Test some finer aspects of internal page transitions.
266 TEST(PartitionAllocTest, PageTransitions)
269 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
271 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
272 EXPECT_EQ(page1, bucket->activePagesHead);
273 EXPECT_EQ(0, page1->nextPage);
274 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
275 EXPECT_EQ(page2, bucket->activePagesHead);
276 EXPECT_EQ(0, page2->nextPage);
278 // Bounce page1 back into the non-full list then fill it up again.
279 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
281 EXPECT_EQ(page1, bucket->activePagesHead);
282 (void) partitionAlloc(allocator.root(), kTestAllocSize);
283 EXPECT_EQ(page1, bucket->activePagesHead);
284 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
286 // Allocating another page at this point should cause us to scan over page1
287 // (which is both full and NOT our current page), and evict it from the
288 // freelist. Older code had a O(n^2) condition due to failure to do this.
289 WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
290 EXPECT_EQ(page3, bucket->activePagesHead);
291 EXPECT_EQ(0, page3->nextPage);
293 // Work out a pointer into page2 and free it.
294 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
296 // Trying to allocate at this time should cause us to cycle around to page2
297 // and find the recently freed slot.
298 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
299 EXPECT_EQ(ptr, newPtr);
300 EXPECT_EQ(page2, bucket->activePagesHead);
301 EXPECT_EQ(page3, page2->nextPage);
303 // Work out a pointer into page1 and free it. This should pull the page
304 // back into the list of available pages.
305 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
307 // This allocation should be satisfied by page1.
308 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
309 EXPECT_EQ(ptr, newPtr);
310 EXPECT_EQ(page1, bucket->activePagesHead);
311 EXPECT_EQ(page2, page1->nextPage);
317 // Allocating whilst in this state exposed a bug, so keep the test.
318 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
324 // Test some corner cases relating to page transitions in the internal
325 // free page list metadata bucket.
326 TEST(PartitionAllocTest, FreePageListPageTransitions)
329 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
331 size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
332 // The +1 is because we need to account for the fact that the current page
333 // never gets thrown on the freelist.
334 ++numToFillFreeListPage;
335 OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
338 for (i = 0; i < numToFillFreeListPage; ++i) {
339 pages[i] = GetFullPage(kTestAllocSize);
341 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
342 for (i = 0; i < numToFillFreeListPage; ++i)
343 FreeFullPage(pages[i]);
344 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
345 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
346 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
347 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
349 // Allocate / free in a different bucket size so we get control of a
350 // different free page list. We need two pages because one will be the last
351 // active page and not get freed.
352 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
353 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
357 // If we re-allocate all kTestAllocSize allocations, we'll pull all the
358 // free pages and end up freeing the first page for free page objects.
359 // It's getting a bit tricky but a nice re-entrancy is going on:
360 // alloc(kTestAllocSize) -> pulls page from free page list ->
361 // free(PartitionFreepagelistEntry) -> last entry in page freed ->
362 // alloc(PartitionFreepagelistEntry).
363 for (i = 0; i < numToFillFreeListPage; ++i) {
364 pages[i] = GetFullPage(kTestAllocSize);
366 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
368 // As part of the final free-up, we'll test another re-entrancy:
369 // free(kTestAllocSize) -> last entry in page freed ->
370 // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
371 // free(PartitionFreepagelistEntry)
372 for (i = 0; i < numToFillFreeListPage; ++i)
373 FreeFullPage(pages[i]);
374 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
375 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
376 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
377 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
382 // Test a large series of allocations that cross more than one underlying
383 // 64KB super page allocation.
384 TEST(PartitionAllocTest, MultiPageAllocs)
387 // This is guaranteed to cross a super page boundary because the first
388 // partition page "slot" will be taken up by a guard page.
389 size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
390 // The super page should begin and end in a guard so we one less page in
391 // order to allocate a single page in the new super page.
394 EXPECT_GT(numPagesNeeded, 1u);
395 OwnPtr<WTF::PartitionPage*[]> pages;
396 pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
397 uintptr_t firstSuperPageBase = 0;
399 for (i = 0; i < numPagesNeeded; ++i) {
400 pages[i] = GetFullPage(kTestAllocSize);
401 void* storagePtr = partitionPageToPointer(pages[i]);
403 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
404 if (i == numPagesNeeded - 1) {
405 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
406 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
407 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
408 // Check that we allocated a guard page for the second page.
409 EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
412 for (i = 0; i < numPagesNeeded; ++i)
413 FreeFullPage(pages[i]);
418 // Test the generic allocation functions that can handle arbitrary sizes and
420 TEST(PartitionAllocTest, GenericAlloc)
424 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
426 partitionFreeGeneric(genericAllocator.root(), ptr);
427 ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBucketed + 1);
429 partitionFreeGeneric(genericAllocator.root(), ptr);
431 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
434 char* charPtr = static_cast<char*>(ptr);
437 // Change the size of the realloc, remaining inside the same bucket.
438 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
439 EXPECT_EQ(ptr, newPtr);
440 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
441 EXPECT_EQ(ptr, newPtr);
442 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket);
443 EXPECT_EQ(ptr, newPtr);
445 // Change the size of the realloc, switching buckets.
446 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket + 1);
447 EXPECT_NE(newPtr, ptr);
448 // Check that the realloc copied correctly.
449 char* newCharPtr = static_cast<char*>(newPtr);
450 EXPECT_EQ(*newCharPtr, 'A');
452 // Subtle: this checks for an old bug where we copied too much from the
453 // source of the realloc. The condition can be detected by a trashing of
454 // the uninitialized value in the space of the upsized allocation.
455 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket)));
458 // The realloc moved. To check that the old allocation was freed, we can
459 // do an alloc of the old allocation size and check that the old allocation
460 // address is at the head of the freelist and reused.
461 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
462 EXPECT_EQ(reusedPtr, origPtr);
463 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
465 // Downsize the realloc.
467 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
468 EXPECT_EQ(newPtr, origPtr);
469 newCharPtr = static_cast<char*>(newPtr);
470 EXPECT_EQ(*newCharPtr, 'B');
473 // Upsize the realloc to outside the partition.
475 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 1);
476 EXPECT_NE(newPtr, ptr);
477 newCharPtr = static_cast<char*>(newPtr);
478 EXPECT_EQ(*newCharPtr, 'C');
481 // Upsize and downsize the realloc, remaining outside the partition.
483 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 10);
484 newCharPtr = static_cast<char*>(newPtr);
485 EXPECT_EQ(*newCharPtr, 'D');
488 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 2);
489 newCharPtr = static_cast<char*>(newPtr);
490 EXPECT_EQ(*newCharPtr, 'E');
493 // Downsize the realloc to inside the partition.
495 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
496 EXPECT_NE(newPtr, ptr);
497 EXPECT_EQ(newPtr, origPtr);
498 newCharPtr = static_cast<char*>(newPtr);
499 EXPECT_EQ(*newCharPtr, 'F');
501 partitionFreeGeneric(genericAllocator.root(), newPtr);
505 // Test the generic allocation functions can handle some specific sizes of
507 TEST(PartitionAllocTest, GenericAllocSizes)
511 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
513 partitionFreeGeneric(genericAllocator.root(), ptr);
515 // kPartitionPageSize is interesting because it results in just one
516 // allocation per page, which tripped up some corner cases.
517 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
518 ptr = partitionAllocGeneric(genericAllocator.root(), size);
520 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
522 partitionFreeGeneric(genericAllocator.root(), ptr);
523 // Should be freeable at this point.
524 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
525 EXPECT_NE(-1, page->freeCacheIndex);
526 partitionFreeGeneric(genericAllocator.root(), ptr2);
528 size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WTF::kSystemPageSize) / 2) - kExtraAllocSize;
529 ptr = partitionAllocGeneric(genericAllocator.root(), size);
531 memset(ptr, 'A', size);
532 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
534 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
536 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
539 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
540 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr3));
541 EXPECT_NE(page, page2);
543 partitionFreeGeneric(genericAllocator.root(), ptr);
544 partitionFreeGeneric(genericAllocator.root(), ptr3);
545 partitionFreeGeneric(genericAllocator.root(), ptr2);
546 // Should be freeable at this point.
547 EXPECT_NE(-1, page->freeCacheIndex);
548 EXPECT_EQ(0, page->numAllocatedSlots);
549 EXPECT_EQ(0, page->numUnprovisionedSlots);
550 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
551 EXPECT_EQ(ptr3, newPtr);
552 newPtr = partitionAllocGeneric(genericAllocator.root(), size);
553 EXPECT_EQ(ptr2, newPtr);
554 #if OS(LINUX) && defined(NDEBUG)
555 // On Linux, we have a guarantee that freelisting a page should cause its
556 // contents to be nulled out. We check for null here to detect an bug we
557 // had where a large slot size was causing us to not properly free all
558 // resources back to the system.
559 // We only run the check in optimized builds because the debug build
560 // writes over the allocated area with an "uninitialized" byte pattern.
561 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
563 partitionFreeGeneric(genericAllocator.root(), newPtr);
564 partitionFreeGeneric(genericAllocator.root(), ptr3);
565 partitionFreeGeneric(genericAllocator.root(), ptr4);
567 // Can we allocate a massive (512MB) size?
568 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
569 partitionFreeGeneric(genericAllocator.root(), ptr);
571 // Check a more reasonable, but still direct mapped, size.
572 // Chop a system page and a byte off to test for rounding errors.
573 size = 20 * 1024 * 1024;
574 size -= WTF::kSystemPageSize;
576 ptr = partitionAllocGeneric(genericAllocator.root(), size);
577 char* charPtr = reinterpret_cast<char*>(ptr);
578 *(charPtr + (size - 1)) = 'A';
579 partitionFreeGeneric(genericAllocator.root(), ptr);
582 partitionFreeGeneric(genericAllocator.root(), 0);
584 // Do we correctly get a null for a failed allocation?
585 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
590 // Test that we can fetch the real allocated size after an allocation.
591 TEST(PartitionAllocTest, GenericAllocGetSize)
596 size_t requestedSize, actualSize, predictedSize;
598 EXPECT_TRUE(partitionAllocSupportsGetSize());
600 // Allocate something small.
601 requestedSize = 511 - kExtraAllocSize;
602 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
603 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
605 actualSize = partitionAllocGetSize(ptr);
606 EXPECT_EQ(predictedSize, actualSize);
607 EXPECT_LT(requestedSize, actualSize);
608 partitionFreeGeneric(genericAllocator.root(), ptr);
610 // Allocate a size that should be a perfect match for a bucket, because it
611 // is an exact power of 2.
612 requestedSize = (256 * 1024) - kExtraAllocSize;
613 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
614 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
616 actualSize = partitionAllocGetSize(ptr);
617 EXPECT_EQ(predictedSize, actualSize);
618 EXPECT_EQ(requestedSize, actualSize);
619 partitionFreeGeneric(genericAllocator.root(), ptr);
621 // Allocate a size that is a system page smaller than a bucket. GetSize()
622 // should return a larger size than we asked for now.
623 requestedSize = (256 * 1024) - WTF::kSystemPageSize - kExtraAllocSize;
624 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
625 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
627 actualSize = partitionAllocGetSize(ptr);
628 EXPECT_EQ(predictedSize, actualSize);
629 EXPECT_EQ(requestedSize + WTF::kSystemPageSize, actualSize);
630 // Check that we can write at the end of the reported size too.
631 char* charPtr = reinterpret_cast<char*>(ptr);
632 *(charPtr + (actualSize - 1)) = 'A';
633 partitionFreeGeneric(genericAllocator.root(), ptr);
635 // Allocate something very large, and uneven.
636 requestedSize = 512 * 1024 * 1024 - 1;
637 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
638 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
640 actualSize = partitionAllocGetSize(ptr);
641 EXPECT_EQ(predictedSize, actualSize);
642 EXPECT_LT(requestedSize, actualSize);
643 partitionFreeGeneric(genericAllocator.root(), ptr);
645 // Too large allocation.
646 requestedSize = INT_MAX;
647 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
648 EXPECT_EQ(requestedSize, predictedSize);
653 // Test the realloc() contract.
654 TEST(PartitionAllocTest, Realloc)
658 // realloc(0, size) should be equivalent to malloc().
659 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
660 memset(ptr, 'A', kTestAllocSize);
661 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
662 // realloc(ptr, 0) should be equivalent to free().
663 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
665 EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead);
667 // Test that growing an allocation with realloc() copies everything from the
669 size_t size = WTF::kSystemPageSize - kExtraAllocSize;
670 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
671 ptr = partitionAllocGeneric(genericAllocator.root(), size);
672 memset(ptr, 'A', size);
673 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
674 EXPECT_NE(ptr, ptr2);
675 char* charPtr2 = static_cast<char*>(ptr2);
676 EXPECT_EQ('A', charPtr2[0]);
677 EXPECT_EQ('A', charPtr2[size - 1]);
679 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
682 // Test that shrinking an allocation with realloc() also copies everything
683 // from the old allocation.
684 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
685 EXPECT_NE(ptr2, ptr);
686 char* charPtr = static_cast<char*>(ptr);
687 EXPECT_EQ('A', charPtr[0]);
688 EXPECT_EQ('A', charPtr[size - 2]);
690 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
693 partitionFreeGeneric(genericAllocator.root(), ptr);
695 // Test that shrinking a direct mapped allocation happens in-place.
696 size = WTF::kGenericMaxBucketed + 16 * WTF::kSystemPageSize;
697 ptr = partitionAllocGeneric(genericAllocator.root(), size);
698 size_t actualSize = partitionAllocGetSize(ptr);
699 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 8 * WTF::kSystemPageSize);
700 EXPECT_EQ(ptr, ptr2);
701 EXPECT_EQ(actualSize - 8 * WTF::kSystemPageSize, partitionAllocGetSize(ptr2));
703 // Test that a previously in-place shrunk direct mapped allocation can be
704 // expanded up again within its original size.
705 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - WTF::kSystemPageSize);
706 EXPECT_EQ(ptr2, ptr);
707 EXPECT_EQ(actualSize - WTF::kSystemPageSize, partitionAllocGetSize(ptr));
709 // Test that a direct mapped allocation is performed not in-place when the
710 // new size is small enough.
711 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kSystemPageSize);
712 EXPECT_NE(ptr, ptr2);
714 partitionFreeGeneric(genericAllocator.root(), ptr2);
719 // Tests the handing out of freelists for partial pages.
720 TEST(PartitionAllocTest, PartialPageFreelists)
724 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
725 EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
726 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
727 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
728 EXPECT_EQ(0, bucket->freePagesHead);
730 void* ptr = partitionAlloc(allocator.root(), bigSize);
733 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
734 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (bigSize + kExtraAllocSize);
735 EXPECT_EQ(4u, totalSlots);
736 // The freelist should have one entry, because we were able to exactly fit
737 // one object slot and one freelist pointer (the null that the head points
738 // to) into a system page.
739 EXPECT_TRUE(page->freelistHead);
740 EXPECT_EQ(1, page->numAllocatedSlots);
741 EXPECT_EQ(2, page->numUnprovisionedSlots);
743 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
745 EXPECT_FALSE(page->freelistHead);
746 EXPECT_EQ(2, page->numAllocatedSlots);
747 EXPECT_EQ(2, page->numUnprovisionedSlots);
749 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
751 EXPECT_TRUE(page->freelistHead);
752 EXPECT_EQ(3, page->numAllocatedSlots);
753 EXPECT_EQ(0, page->numUnprovisionedSlots);
755 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
757 EXPECT_FALSE(page->freelistHead);
758 EXPECT_EQ(4, page->numAllocatedSlots);
759 EXPECT_EQ(0, page->numUnprovisionedSlots);
761 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
764 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
765 EXPECT_EQ(1, page2->numAllocatedSlots);
767 // Churn things a little whilst there's a partial page freelist.
769 ptr = partitionAlloc(allocator.root(), bigSize);
770 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
778 EXPECT_NE(-1, page->freeCacheIndex);
779 EXPECT_NE(-1, page2->freeCacheIndex);
780 EXPECT_TRUE(page2->freelistHead);
781 EXPECT_EQ(0, page2->numAllocatedSlots);
783 // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
784 size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize;
785 bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
786 bucket = &allocator.root()->buckets()[bucketIdx];
787 EXPECT_EQ(0, bucket->freePagesHead);
789 ptr = partitionAlloc(allocator.root(), mediumSize);
791 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
792 EXPECT_EQ(1, page->numAllocatedSlots);
793 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (mediumSize + kExtraAllocSize);
794 size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
795 EXPECT_EQ(2u, firstPageSlots);
796 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
800 size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize;
801 bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
802 bucket = &allocator.root()->buckets()[bucketIdx];
803 EXPECT_EQ(0, bucket->freePagesHead);
805 ptr = partitionAlloc(allocator.root(), smallSize);
807 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
808 EXPECT_EQ(1, page->numAllocatedSlots);
809 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (smallSize + kExtraAllocSize);
810 firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
811 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
814 EXPECT_TRUE(page->freelistHead);
815 EXPECT_EQ(0, page->numAllocatedSlots);
817 size_t verySmallSize = 32 - kExtraAllocSize;
818 bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
819 bucket = &allocator.root()->buckets()[bucketIdx];
820 EXPECT_EQ(0, bucket->freePagesHead);
822 ptr = partitionAlloc(allocator.root(), verySmallSize);
824 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
825 EXPECT_EQ(1, page->numAllocatedSlots);
826 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (verySmallSize + kExtraAllocSize);
827 firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
828 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
831 EXPECT_TRUE(page->freelistHead);
832 EXPECT_EQ(0, page->numAllocatedSlots);
834 // And try an allocation size (against the generic allocator) that is
835 // larger than a system page.
836 size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2)) - kExtraAllocSize;
837 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
839 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
840 EXPECT_EQ(1, page->numAllocatedSlots);
841 EXPECT_TRUE(page->freelistHead);
842 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
843 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
844 partitionFreeGeneric(genericAllocator.root(), ptr);
846 // And then make sure than exactly the page size only faults one page.
847 size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize;
848 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
850 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
851 EXPECT_EQ(1, page->numAllocatedSlots);
852 EXPECT_FALSE(page->freelistHead);
853 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageSize + kExtraAllocSize);
854 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
855 partitionFreeGeneric(genericAllocator.root(), ptr);
860 // Test some of the fragmentation-resistant properties of the allocator.
861 TEST(PartitionAllocTest, PageRefilling)
864 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
866 // Grab two full pages and a non-full page.
867 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
868 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
869 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
871 EXPECT_NE(page1, bucket->activePagesHead);
872 EXPECT_NE(page2, bucket->activePagesHead);
873 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
874 EXPECT_EQ(1, page->numAllocatedSlots);
876 // Work out a pointer into page2 and free it; and then page1 and free it.
877 char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
879 ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
882 // If we perform two allocations from the same bucket now, we expect to
883 // refill both the nearly full pages.
884 (void) partitionAlloc(allocator.root(), kTestAllocSize);
885 (void) partitionAlloc(allocator.root(), kTestAllocSize);
886 EXPECT_EQ(1, page->numAllocatedSlots);
895 // Basic tests to ensure that allocations work for partial page buckets.
896 TEST(PartitionAllocTest, PartialPages)
900 // Find a size that is backed by a partial partition page.
901 size_t size = sizeof(void*);
902 WTF::PartitionBucket* bucket = 0;
903 while (size < kTestMaxAllocation) {
904 bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
905 if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartitionPage)
907 size += sizeof(void*);
909 EXPECT_LT(size, kTestMaxAllocation);
911 WTF::PartitionPage* page1 = GetFullPage(size);
912 WTF::PartitionPage* page2 = GetFullPage(size);
919 // Test correct handling if our mapping collides with another.
920 TEST(PartitionAllocTest, MappingCollision)
923 // The -2 is because the first and last partition pages in a super page are
925 size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
926 OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
927 OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
930 for (i = 0; i < numPartitionPagesNeeded; ++i)
931 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
933 char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
934 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
935 pageBase -= WTF::kPartitionPageSize;
936 // Map a single system page either side of the mapping for our allocations,
937 // with the goal of tripping up alignment of the next mapping.
938 void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
940 void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
942 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
943 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
945 for (i = 0; i < numPartitionPagesNeeded; ++i)
946 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
948 WTF::freePages(map1, WTF::kPageAllocationGranularity);
949 WTF::freePages(map2, WTF::kPageAllocationGranularity);
951 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
952 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
953 pageBase -= WTF::kPartitionPageSize;
954 // Map a single system page either side of the mapping for our allocations,
955 // with the goal of tripping up alignment of the next mapping.
956 map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
958 map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
960 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
961 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
963 WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
964 WTF::freePages(map1, WTF::kPageAllocationGranularity);
965 WTF::freePages(map2, WTF::kPageAllocationGranularity);
967 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
969 // And make sure we really did get a page in a new superpage.
970 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
971 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
973 FreeFullPage(pageInThirdSuperPage);
974 for (i = 0; i < numPartitionPagesNeeded; ++i) {
975 FreeFullPage(firstSuperPagePages[i]);
976 FreeFullPage(secondSuperPagePages[i]);
982 // Tests that pages in the free page cache do get freed as appropriate.
983 TEST(PartitionAllocTest, FreeCache)
987 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
988 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
989 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
991 void* ptr = partitionAlloc(allocator.root(), bigSize);
993 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
994 EXPECT_EQ(0, bucket->freePagesHead);
995 EXPECT_EQ(1, page->numAllocatedSlots);
997 EXPECT_EQ(0, page->numAllocatedSlots);
998 EXPECT_NE(-1, page->freeCacheIndex);
999 EXPECT_TRUE(page->freelistHead);
1001 CycleFreeCache(kTestAllocSize);
1003 // Flushing the cache should have really freed the unused page.
1004 EXPECT_FALSE(page->freelistHead);
1005 EXPECT_EQ(-1, page->freeCacheIndex);
1006 EXPECT_EQ(0, page->numAllocatedSlots);
1008 // Check that an allocation works ok whilst in this state (a free'd page
1009 // as the active pages head).
1010 ptr = partitionAlloc(allocator.root(), bigSize);
1011 EXPECT_FALSE(bucket->freePagesHead);
1014 // Also check that a page that is bouncing immediately between empty and
1015 // used does not get freed.
1016 for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) {
1017 ptr = partitionAlloc(allocator.root(), bigSize);
1018 EXPECT_TRUE(page->freelistHead);
1020 EXPECT_TRUE(page->freelistHead);
1026 // Tests for a bug we had with losing references to free pages.
1027 TEST(PartitionAllocTest, LostFreePagesBug)
1031 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
1033 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1035 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1038 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
1039 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr2));
1040 WTF::PartitionBucket* bucket = page->bucket;
1042 EXPECT_EQ(0, bucket->freePagesHead);
1043 EXPECT_EQ(-1, page->numAllocatedSlots);
1044 EXPECT_EQ(1, page2->numAllocatedSlots);
1046 partitionFreeGeneric(genericAllocator.root(), ptr);
1047 partitionFreeGeneric(genericAllocator.root(), ptr2);
1049 EXPECT_EQ(0, bucket->freePagesHead);
1050 EXPECT_EQ(0, page->numAllocatedSlots);
1051 EXPECT_EQ(0, page2->numAllocatedSlots);
1052 EXPECT_TRUE(page->freelistHead);
1053 EXPECT_TRUE(page2->freelistHead);
1055 CycleGenericFreeCache(kTestAllocSize);
1057 EXPECT_FALSE(page->freelistHead);
1058 EXPECT_FALSE(page2->freelistHead);
1060 EXPECT_FALSE(bucket->freePagesHead);
1061 EXPECT_TRUE(bucket->activePagesHead);
1062 EXPECT_TRUE(bucket->activePagesHead->nextPage);
1064 // At this moment, we have two freed pages, on the freelist.
1066 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1068 partitionFreeGeneric(genericAllocator.root(), ptr);
1070 EXPECT_TRUE(bucket->activePagesHead);
1071 EXPECT_TRUE(bucket->freePagesHead);
1073 CycleGenericFreeCache(kTestAllocSize);
1075 // We're now set up to trigger the bug by scanning over the active pages
1076 // list, where the current active page is freed, and there exists at least
1077 // one freed page in the free pages list.
1078 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1080 partitionFreeGeneric(genericAllocator.root(), ptr);
1082 EXPECT_TRUE(bucket->activePagesHead);
1083 EXPECT_TRUE(bucket->freePagesHead);
1090 // Make sure that malloc(-1) dies.
1091 // In the past, we had an integer overflow that would alias malloc(-1) to
1092 // malloc(0), which is not good.
1093 TEST(PartitionAllocDeathTest, LargeAllocs)
1097 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
1098 // And the smallest allocation we expect to die.
1099 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
1104 // Check that our immediate double-free detection works.
1105 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
1109 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1111 partitionFreeGeneric(genericAllocator.root(), ptr);
1113 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1118 // Check that our refcount-based double-free detection works.
1119 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
1123 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1125 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1127 partitionFreeGeneric(genericAllocator.root(), ptr);
1128 partitionFreeGeneric(genericAllocator.root(), ptr2);
1129 // This is not an immediate double-free so our immediate detection won't
1130 // fire. However, it does take the "refcount" of the partition page to -1,
1131 // which is illegal and should be trapped.
1132 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1137 // Check that guard pages are present where expected.
1138 TEST(PartitionAllocDeathTest, GuardPages)
1142 // This large size will result in a direct mapped allocation with guard
1143 // pages at either end.
1144 size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllocSize;
1145 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1147 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1149 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1150 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1152 partitionFreeGeneric(genericAllocator.root(), ptr);
1157 #endif // !OS(ANDROID)
1159 // Tests that the countLeadingZeros() functions work to our satisfaction.
1160 // It doesn't seem worth the overhead of a whole new file for these tests, so
1161 // we'll put them here since partitionAllocGeneric will depend heavily on these
1162 // functions working correctly.
1163 TEST(PartitionAllocTest, CLZWorks)
1165 EXPECT_EQ(32u, WTF::countLeadingZeros32(0));
1166 EXPECT_EQ(31u, WTF::countLeadingZeros32(1));
1167 EXPECT_EQ(1u, WTF::countLeadingZeros32(1 << 30));
1168 EXPECT_EQ(0u, WTF::countLeadingZeros32(1 << 31));
1171 EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
1172 EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
1173 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
1174 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
1175 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
1177 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0));
1178 EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1));
1179 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1 << 30));
1180 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1 << 31));
1186 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)