2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "wtf/PartitionAlloc.h"
34 #include "wtf/BitwiseOperations.h"
36 #include "wtf/OwnPtr.h"
37 #include "wtf/PassOwnPtr.h"
38 #include <gtest/gtest.h>
44 #include <sys/resource.h>
48 #define MAP_ANONYMOUS MAP_ANON
52 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
56 static const size_t kTestMaxAllocation = 4096;
57 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
58 static PartitionAllocatorGeneric genericAllocator;
60 static const size_t kTestAllocSize = 16;
62 static const size_t kPointerOffset = 0;
63 static const size_t kExtraAllocSize = 0;
65 static const size_t kPointerOffset = WTF::kCookieSize;
66 static const size_t kExtraAllocSize = WTF::kCookieSize * 2;
68 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
69 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
71 static void TestSetup()
74 genericAllocator.init();
77 static void TestShutdown()
80 // Test that the partition statistic dumping code works. Previously, it
81 // bitrotted because no test calls it.
82 partitionDumpStats(*allocator.root());
85 // We expect no leaks in the general case. We have a test for leak
87 EXPECT_TRUE(allocator.shutdown());
88 EXPECT_TRUE(genericAllocator.shutdown());
91 static bool SetAddressSpaceLimit()
94 // 32 bits => address space is limited already.
97 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024;
99 if (getrlimit(RLIMIT_AS, &limit) != 0)
101 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
102 limit.rlim_cur = kAddressSpaceLimit;
103 if (setrlimit(RLIMIT_AS, &limit) != 0)
112 static bool ClearAddressSpaceLimit()
118 if (getrlimit(RLIMIT_AS, &limit) != 0)
120 limit.rlim_cur = limit.rlim_max;
121 if (setrlimit(RLIMIT_AS, &limit) != 0)
129 static WTF::PartitionPage* GetFullPage(size_t size)
131 size_t realSize = size + kExtraAllocSize;
132 size_t bucketIdx = realSize >> WTF::kBucketShift;
133 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
134 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize;
138 for (i = 0; i < numSlots; ++i) {
139 void* ptr = partitionAlloc(allocator.root(), size);
142 first = WTF::partitionCookieFreePointerAdjust(ptr);
143 else if (i == numSlots - 1)
144 last = WTF::partitionCookieFreePointerAdjust(ptr);
146 EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(last));
147 if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPage)
148 EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
149 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
150 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
151 EXPECT_TRUE(bucket->activePagesHead);
152 EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage);
153 return bucket->activePagesHead;
156 static void FreeFullPage(WTF::PartitionPage* page)
158 size_t size = page->bucket->slotSize;
159 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / size;
160 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
161 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
163 for (i = 0; i < numSlots; ++i) {
164 partitionFree(ptr + kPointerOffset);
169 static void CycleFreeCache(size_t size)
171 size_t realSize = size + kExtraAllocSize;
172 size_t bucketIdx = realSize >> WTF::kBucketShift;
173 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
174 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
176 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
177 void* ptr = partitionAlloc(allocator.root(), size);
178 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
180 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
181 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
185 static void CycleGenericFreeCache(size_t size)
187 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
188 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
189 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
190 WTF::PartitionBucket* bucket = page->bucket;
191 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
192 partitionFreeGeneric(genericAllocator.root(), ptr);
193 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
194 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
198 // Check that the most basic of allocate / free pairs work.
199 TEST(PartitionAllocTest, Basic)
202 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
203 WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage;
205 EXPECT_FALSE(bucket->freePagesHead);
206 EXPECT_EQ(seedPage, bucket->activePagesHead);
207 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
209 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
211 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
212 // Check that the offset appears to include a guard page.
213 EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
216 // Expect that the last active page does not get tossed to the freelist.
217 EXPECT_FALSE(bucket->freePagesHead);
222 // Check that we can detect a memory leak.
223 TEST(PartitionAllocTest, SimpleLeak)
226 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
228 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
230 EXPECT_FALSE(allocator.shutdown());
231 EXPECT_FALSE(genericAllocator.shutdown());
234 // Test multiple allocations, and freelist handling.
235 TEST(PartitionAllocTest, MultiAlloc)
239 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
240 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
243 ptrdiff_t diff = ptr2 - ptr1;
244 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
246 // Check that we re-use the just-freed slot.
248 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
251 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
253 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
256 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
258 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
261 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
270 // Test a bucket with multiple pages.
271 TEST(PartitionAllocTest, MultiPages)
274 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
276 WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
278 EXPECT_FALSE(bucket->freePagesHead);
279 EXPECT_EQ(page, bucket->activePagesHead);
280 EXPECT_EQ(0, page->nextPage);
281 EXPECT_EQ(0, page->numAllocatedSlots);
283 page = GetFullPage(kTestAllocSize);
284 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
286 EXPECT_EQ(page2, bucket->activePagesHead);
287 EXPECT_EQ(0, page2->nextPage);
288 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
290 // Fully free the non-current page. It should not be freelisted because
291 // there is no other immediately useable page. The other page is full.
293 EXPECT_EQ(0, page->numAllocatedSlots);
294 EXPECT_FALSE(bucket->freePagesHead);
295 EXPECT_EQ(page, bucket->activePagesHead);
297 // Allocate a new page, it should pull from the freelist.
298 page = GetFullPage(kTestAllocSize);
299 EXPECT_FALSE(bucket->freePagesHead);
300 EXPECT_EQ(page, bucket->activePagesHead);
304 EXPECT_EQ(0, page->numAllocatedSlots);
305 EXPECT_EQ(0, page2->numAllocatedSlots);
306 EXPECT_EQ(0, page2->numUnprovisionedSlots);
307 EXPECT_NE(-1, page2->freeCacheIndex);
312 // Test some finer aspects of internal page transitions.
313 TEST(PartitionAllocTest, PageTransitions)
316 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
318 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
319 EXPECT_EQ(page1, bucket->activePagesHead);
320 EXPECT_EQ(0, page1->nextPage);
321 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
322 EXPECT_EQ(page2, bucket->activePagesHead);
323 EXPECT_EQ(0, page2->nextPage);
325 // Bounce page1 back into the non-full list then fill it up again.
326 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
328 EXPECT_EQ(page1, bucket->activePagesHead);
329 (void) partitionAlloc(allocator.root(), kTestAllocSize);
330 EXPECT_EQ(page1, bucket->activePagesHead);
331 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
333 // Allocating another page at this point should cause us to scan over page1
334 // (which is both full and NOT our current page), and evict it from the
335 // freelist. Older code had a O(n^2) condition due to failure to do this.
336 WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
337 EXPECT_EQ(page3, bucket->activePagesHead);
338 EXPECT_EQ(0, page3->nextPage);
340 // Work out a pointer into page2 and free it.
341 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
343 // Trying to allocate at this time should cause us to cycle around to page2
344 // and find the recently freed slot.
345 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
346 EXPECT_EQ(ptr, newPtr);
347 EXPECT_EQ(page2, bucket->activePagesHead);
348 EXPECT_EQ(page3, page2->nextPage);
350 // Work out a pointer into page1 and free it. This should pull the page
351 // back into the list of available pages.
352 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
354 // This allocation should be satisfied by page1.
355 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
356 EXPECT_EQ(ptr, newPtr);
357 EXPECT_EQ(page1, bucket->activePagesHead);
358 EXPECT_EQ(page2, page1->nextPage);
364 // Allocating whilst in this state exposed a bug, so keep the test.
365 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
371 // Test some corner cases relating to page transitions in the internal
372 // free page list metadata bucket.
373 TEST(PartitionAllocTest, FreePageListPageTransitions)
376 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
378 size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
379 // The +1 is because we need to account for the fact that the current page
380 // never gets thrown on the freelist.
381 ++numToFillFreeListPage;
382 OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
385 for (i = 0; i < numToFillFreeListPage; ++i) {
386 pages[i] = GetFullPage(kTestAllocSize);
388 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
389 for (i = 0; i < numToFillFreeListPage; ++i)
390 FreeFullPage(pages[i]);
391 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
392 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
393 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
394 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
396 // Allocate / free in a different bucket size so we get control of a
397 // different free page list. We need two pages because one will be the last
398 // active page and not get freed.
399 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
400 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
404 // If we re-allocate all kTestAllocSize allocations, we'll pull all the
405 // free pages and end up freeing the first page for free page objects.
406 // It's getting a bit tricky but a nice re-entrancy is going on:
407 // alloc(kTestAllocSize) -> pulls page from free page list ->
408 // free(PartitionFreepagelistEntry) -> last entry in page freed ->
409 // alloc(PartitionFreepagelistEntry).
410 for (i = 0; i < numToFillFreeListPage; ++i) {
411 pages[i] = GetFullPage(kTestAllocSize);
413 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
415 // As part of the final free-up, we'll test another re-entrancy:
416 // free(kTestAllocSize) -> last entry in page freed ->
417 // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
418 // free(PartitionFreepagelistEntry)
419 for (i = 0; i < numToFillFreeListPage; ++i)
420 FreeFullPage(pages[i]);
421 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
422 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
423 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
424 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
429 // Test a large series of allocations that cross more than one underlying
430 // 64KB super page allocation.
431 TEST(PartitionAllocTest, MultiPageAllocs)
434 // This is guaranteed to cross a super page boundary because the first
435 // partition page "slot" will be taken up by a guard page.
436 size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
437 // The super page should begin and end in a guard so we one less page in
438 // order to allocate a single page in the new super page.
441 EXPECT_GT(numPagesNeeded, 1u);
442 OwnPtr<WTF::PartitionPage*[]> pages;
443 pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
444 uintptr_t firstSuperPageBase = 0;
446 for (i = 0; i < numPagesNeeded; ++i) {
447 pages[i] = GetFullPage(kTestAllocSize);
448 void* storagePtr = partitionPageToPointer(pages[i]);
450 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
451 if (i == numPagesNeeded - 1) {
452 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
453 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
454 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
455 // Check that we allocated a guard page for the second page.
456 EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
459 for (i = 0; i < numPagesNeeded; ++i)
460 FreeFullPage(pages[i]);
465 // Test the generic allocation functions that can handle arbitrary sizes and
467 TEST(PartitionAllocTest, GenericAlloc)
471 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
473 partitionFreeGeneric(genericAllocator.root(), ptr);
474 ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBucketed + 1);
476 partitionFreeGeneric(genericAllocator.root(), ptr);
478 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
481 char* charPtr = static_cast<char*>(ptr);
484 // Change the size of the realloc, remaining inside the same bucket.
485 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
486 EXPECT_EQ(ptr, newPtr);
487 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
488 EXPECT_EQ(ptr, newPtr);
489 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket);
490 EXPECT_EQ(ptr, newPtr);
492 // Change the size of the realloc, switching buckets.
493 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket + 1);
494 EXPECT_NE(newPtr, ptr);
495 // Check that the realloc copied correctly.
496 char* newCharPtr = static_cast<char*>(newPtr);
497 EXPECT_EQ(*newCharPtr, 'A');
499 // Subtle: this checks for an old bug where we copied too much from the
500 // source of the realloc. The condition can be detected by a trashing of
501 // the uninitialized value in the space of the upsized allocation.
502 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket)));
505 // The realloc moved. To check that the old allocation was freed, we can
506 // do an alloc of the old allocation size and check that the old allocation
507 // address is at the head of the freelist and reused.
508 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
509 EXPECT_EQ(reusedPtr, origPtr);
510 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
512 // Downsize the realloc.
514 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
515 EXPECT_EQ(newPtr, origPtr);
516 newCharPtr = static_cast<char*>(newPtr);
517 EXPECT_EQ(*newCharPtr, 'B');
520 // Upsize the realloc to outside the partition.
522 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 1);
523 EXPECT_NE(newPtr, ptr);
524 newCharPtr = static_cast<char*>(newPtr);
525 EXPECT_EQ(*newCharPtr, 'C');
528 // Upsize and downsize the realloc, remaining outside the partition.
530 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 10);
531 newCharPtr = static_cast<char*>(newPtr);
532 EXPECT_EQ(*newCharPtr, 'D');
535 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 2);
536 newCharPtr = static_cast<char*>(newPtr);
537 EXPECT_EQ(*newCharPtr, 'E');
540 // Downsize the realloc to inside the partition.
542 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
543 EXPECT_NE(newPtr, ptr);
544 EXPECT_EQ(newPtr, origPtr);
545 newCharPtr = static_cast<char*>(newPtr);
546 EXPECT_EQ(*newCharPtr, 'F');
548 partitionFreeGeneric(genericAllocator.root(), newPtr);
552 // Test the generic allocation functions can handle some specific sizes of
554 TEST(PartitionAllocTest, GenericAllocSizes)
558 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
560 partitionFreeGeneric(genericAllocator.root(), ptr);
562 // kPartitionPageSize is interesting because it results in just one
563 // allocation per page, which tripped up some corner cases.
564 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
565 ptr = partitionAllocGeneric(genericAllocator.root(), size);
567 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
569 partitionFreeGeneric(genericAllocator.root(), ptr);
570 // Should be freeable at this point.
571 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
572 EXPECT_NE(-1, page->freeCacheIndex);
573 partitionFreeGeneric(genericAllocator.root(), ptr2);
575 size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WTF::kSystemPageSize) / 2) - kExtraAllocSize;
576 ptr = partitionAllocGeneric(genericAllocator.root(), size);
578 memset(ptr, 'A', size);
579 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
581 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
583 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
586 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
587 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr3));
588 EXPECT_NE(page, page2);
590 partitionFreeGeneric(genericAllocator.root(), ptr);
591 partitionFreeGeneric(genericAllocator.root(), ptr3);
592 partitionFreeGeneric(genericAllocator.root(), ptr2);
593 // Should be freeable at this point.
594 EXPECT_NE(-1, page->freeCacheIndex);
595 EXPECT_EQ(0, page->numAllocatedSlots);
596 EXPECT_EQ(0, page->numUnprovisionedSlots);
597 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
598 EXPECT_EQ(ptr3, newPtr);
599 newPtr = partitionAllocGeneric(genericAllocator.root(), size);
600 EXPECT_EQ(ptr2, newPtr);
601 #if OS(LINUX) && !ENABLE(ASSERT)
602 // On Linux, we have a guarantee that freelisting a page should cause its
603 // contents to be nulled out. We check for null here to detect an bug we
604 // had where a large slot size was causing us to not properly free all
605 // resources back to the system.
606 // We only run the check when asserts are disabled because when they are
607 // enabled, the allocated area is overwritten with an "uninitialized"
609 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
611 partitionFreeGeneric(genericAllocator.root(), newPtr);
612 partitionFreeGeneric(genericAllocator.root(), ptr3);
613 partitionFreeGeneric(genericAllocator.root(), ptr4);
615 // Can we allocate a massive (512MB) size?
616 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
617 partitionFreeGeneric(genericAllocator.root(), ptr);
619 // Check a more reasonable, but still direct mapped, size.
620 // Chop a system page and a byte off to test for rounding errors.
621 size = 20 * 1024 * 1024;
622 size -= WTF::kSystemPageSize;
624 ptr = partitionAllocGeneric(genericAllocator.root(), size);
625 char* charPtr = reinterpret_cast<char*>(ptr);
626 *(charPtr + (size - 1)) = 'A';
627 partitionFreeGeneric(genericAllocator.root(), ptr);
630 partitionFreeGeneric(genericAllocator.root(), 0);
632 // Do we correctly get a null for a failed allocation?
633 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
638 // Test that we can fetch the real allocated size after an allocation.
639 TEST(PartitionAllocTest, GenericAllocGetSize)
644 size_t requestedSize, actualSize, predictedSize;
646 EXPECT_TRUE(partitionAllocSupportsGetSize());
648 // Allocate something small.
649 requestedSize = 511 - kExtraAllocSize;
650 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
651 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
653 actualSize = partitionAllocGetSize(ptr);
654 EXPECT_EQ(predictedSize, actualSize);
655 EXPECT_LT(requestedSize, actualSize);
656 partitionFreeGeneric(genericAllocator.root(), ptr);
658 // Allocate a size that should be a perfect match for a bucket, because it
659 // is an exact power of 2.
660 requestedSize = (256 * 1024) - kExtraAllocSize;
661 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
662 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
664 actualSize = partitionAllocGetSize(ptr);
665 EXPECT_EQ(predictedSize, actualSize);
666 EXPECT_EQ(requestedSize, actualSize);
667 partitionFreeGeneric(genericAllocator.root(), ptr);
669 // Allocate a size that is a system page smaller than a bucket. GetSize()
670 // should return a larger size than we asked for now.
671 requestedSize = (256 * 1024) - WTF::kSystemPageSize - kExtraAllocSize;
672 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
673 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
675 actualSize = partitionAllocGetSize(ptr);
676 EXPECT_EQ(predictedSize, actualSize);
677 EXPECT_EQ(requestedSize + WTF::kSystemPageSize, actualSize);
678 // Check that we can write at the end of the reported size too.
679 char* charPtr = reinterpret_cast<char*>(ptr);
680 *(charPtr + (actualSize - 1)) = 'A';
681 partitionFreeGeneric(genericAllocator.root(), ptr);
683 // Allocate something very large, and uneven.
684 requestedSize = 512 * 1024 * 1024 - 1;
685 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
686 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
688 actualSize = partitionAllocGetSize(ptr);
689 EXPECT_EQ(predictedSize, actualSize);
690 EXPECT_LT(requestedSize, actualSize);
691 partitionFreeGeneric(genericAllocator.root(), ptr);
693 // Too large allocation.
694 requestedSize = INT_MAX;
695 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
696 EXPECT_EQ(requestedSize, predictedSize);
701 // Test the realloc() contract.
702 TEST(PartitionAllocTest, Realloc)
706 // realloc(0, size) should be equivalent to malloc().
707 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
708 memset(ptr, 'A', kTestAllocSize);
709 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
710 // realloc(ptr, 0) should be equivalent to free().
711 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
713 EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead);
715 // Test that growing an allocation with realloc() copies everything from the
717 size_t size = WTF::kSystemPageSize - kExtraAllocSize;
718 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
719 ptr = partitionAllocGeneric(genericAllocator.root(), size);
720 memset(ptr, 'A', size);
721 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
722 EXPECT_NE(ptr, ptr2);
723 char* charPtr2 = static_cast<char*>(ptr2);
724 EXPECT_EQ('A', charPtr2[0]);
725 EXPECT_EQ('A', charPtr2[size - 1]);
727 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
730 // Test that shrinking an allocation with realloc() also copies everything
731 // from the old allocation.
732 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
733 EXPECT_NE(ptr2, ptr);
734 char* charPtr = static_cast<char*>(ptr);
735 EXPECT_EQ('A', charPtr[0]);
736 EXPECT_EQ('A', charPtr[size - 2]);
738 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
741 partitionFreeGeneric(genericAllocator.root(), ptr);
743 // Test that shrinking a direct mapped allocation happens in-place.
744 size = WTF::kGenericMaxBucketed + 16 * WTF::kSystemPageSize;
745 ptr = partitionAllocGeneric(genericAllocator.root(), size);
746 size_t actualSize = partitionAllocGetSize(ptr);
747 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 8 * WTF::kSystemPageSize);
748 EXPECT_EQ(ptr, ptr2);
749 EXPECT_EQ(actualSize - 8 * WTF::kSystemPageSize, partitionAllocGetSize(ptr2));
751 // Test that a previously in-place shrunk direct mapped allocation can be
752 // expanded up again within its original size.
753 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - WTF::kSystemPageSize);
754 EXPECT_EQ(ptr2, ptr);
755 EXPECT_EQ(actualSize - WTF::kSystemPageSize, partitionAllocGetSize(ptr));
757 // Test that a direct mapped allocation is performed not in-place when the
758 // new size is small enough.
759 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kSystemPageSize);
760 EXPECT_NE(ptr, ptr2);
762 partitionFreeGeneric(genericAllocator.root(), ptr2);
767 // Tests the handing out of freelists for partial pages.
768 TEST(PartitionAllocTest, PartialPageFreelists)
772 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
773 EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
774 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
775 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
776 EXPECT_EQ(0, bucket->freePagesHead);
778 void* ptr = partitionAlloc(allocator.root(), bigSize);
781 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
782 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (bigSize + kExtraAllocSize);
783 EXPECT_EQ(4u, totalSlots);
784 // The freelist should have one entry, because we were able to exactly fit
785 // one object slot and one freelist pointer (the null that the head points
786 // to) into a system page.
787 EXPECT_TRUE(page->freelistHead);
788 EXPECT_EQ(1, page->numAllocatedSlots);
789 EXPECT_EQ(2, page->numUnprovisionedSlots);
791 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
793 EXPECT_FALSE(page->freelistHead);
794 EXPECT_EQ(2, page->numAllocatedSlots);
795 EXPECT_EQ(2, page->numUnprovisionedSlots);
797 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
799 EXPECT_TRUE(page->freelistHead);
800 EXPECT_EQ(3, page->numAllocatedSlots);
801 EXPECT_EQ(0, page->numUnprovisionedSlots);
803 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
805 EXPECT_FALSE(page->freelistHead);
806 EXPECT_EQ(4, page->numAllocatedSlots);
807 EXPECT_EQ(0, page->numUnprovisionedSlots);
809 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
812 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
813 EXPECT_EQ(1, page2->numAllocatedSlots);
815 // Churn things a little whilst there's a partial page freelist.
817 ptr = partitionAlloc(allocator.root(), bigSize);
818 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
826 EXPECT_NE(-1, page->freeCacheIndex);
827 EXPECT_NE(-1, page2->freeCacheIndex);
828 EXPECT_TRUE(page2->freelistHead);
829 EXPECT_EQ(0, page2->numAllocatedSlots);
831 // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
832 size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize;
833 bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
834 bucket = &allocator.root()->buckets()[bucketIdx];
835 EXPECT_EQ(0, bucket->freePagesHead);
837 ptr = partitionAlloc(allocator.root(), mediumSize);
839 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
840 EXPECT_EQ(1, page->numAllocatedSlots);
841 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (mediumSize + kExtraAllocSize);
842 size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
843 EXPECT_EQ(2u, firstPageSlots);
844 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
848 size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize;
849 bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
850 bucket = &allocator.root()->buckets()[bucketIdx];
851 EXPECT_EQ(0, bucket->freePagesHead);
853 ptr = partitionAlloc(allocator.root(), smallSize);
855 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
856 EXPECT_EQ(1, page->numAllocatedSlots);
857 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (smallSize + kExtraAllocSize);
858 firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
859 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
862 EXPECT_TRUE(page->freelistHead);
863 EXPECT_EQ(0, page->numAllocatedSlots);
865 size_t verySmallSize = 32 - kExtraAllocSize;
866 bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
867 bucket = &allocator.root()->buckets()[bucketIdx];
868 EXPECT_EQ(0, bucket->freePagesHead);
870 ptr = partitionAlloc(allocator.root(), verySmallSize);
872 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
873 EXPECT_EQ(1, page->numAllocatedSlots);
874 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (verySmallSize + kExtraAllocSize);
875 firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
876 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
879 EXPECT_TRUE(page->freelistHead);
880 EXPECT_EQ(0, page->numAllocatedSlots);
882 // And try an allocation size (against the generic allocator) that is
883 // larger than a system page.
884 size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2)) - kExtraAllocSize;
885 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
887 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
888 EXPECT_EQ(1, page->numAllocatedSlots);
889 EXPECT_TRUE(page->freelistHead);
890 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
891 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
892 partitionFreeGeneric(genericAllocator.root(), ptr);
894 // And then make sure than exactly the page size only faults one page.
895 size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize;
896 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
898 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
899 EXPECT_EQ(1, page->numAllocatedSlots);
900 EXPECT_FALSE(page->freelistHead);
901 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageSize + kExtraAllocSize);
902 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
903 partitionFreeGeneric(genericAllocator.root(), ptr);
908 // Test some of the fragmentation-resistant properties of the allocator.
909 TEST(PartitionAllocTest, PageRefilling)
912 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
914 // Grab two full pages and a non-full page.
915 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
916 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
917 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
919 EXPECT_NE(page1, bucket->activePagesHead);
920 EXPECT_NE(page2, bucket->activePagesHead);
921 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
922 EXPECT_EQ(1, page->numAllocatedSlots);
924 // Work out a pointer into page2 and free it; and then page1 and free it.
925 char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
927 ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
930 // If we perform two allocations from the same bucket now, we expect to
931 // refill both the nearly full pages.
932 (void) partitionAlloc(allocator.root(), kTestAllocSize);
933 (void) partitionAlloc(allocator.root(), kTestAllocSize);
934 EXPECT_EQ(1, page->numAllocatedSlots);
943 // Basic tests to ensure that allocations work for partial page buckets.
944 TEST(PartitionAllocTest, PartialPages)
948 // Find a size that is backed by a partial partition page.
949 size_t size = sizeof(void*);
950 WTF::PartitionBucket* bucket = 0;
951 while (size < kTestMaxAllocation) {
952 bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
953 if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartitionPage)
955 size += sizeof(void*);
957 EXPECT_LT(size, kTestMaxAllocation);
959 WTF::PartitionPage* page1 = GetFullPage(size);
960 WTF::PartitionPage* page2 = GetFullPage(size);
967 // Test correct handling if our mapping collides with another.
968 TEST(PartitionAllocTest, MappingCollision)
971 // The -2 is because the first and last partition pages in a super page are
973 size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
974 OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
975 OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
978 for (i = 0; i < numPartitionPagesNeeded; ++i)
979 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
981 char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
982 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
983 pageBase -= WTF::kPartitionPageSize;
984 // Map a single system page either side of the mapping for our allocations,
985 // with the goal of tripping up alignment of the next mapping.
986 void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
988 void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
990 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
991 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
993 for (i = 0; i < numPartitionPagesNeeded; ++i)
994 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
996 WTF::freePages(map1, WTF::kPageAllocationGranularity);
997 WTF::freePages(map2, WTF::kPageAllocationGranularity);
999 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
1000 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
1001 pageBase -= WTF::kPartitionPageSize;
1002 // Map a single system page either side of the mapping for our allocations,
1003 // with the goal of tripping up alignment of the next mapping.
1004 map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
1006 map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
1008 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
1009 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
1011 WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
1012 WTF::freePages(map1, WTF::kPageAllocationGranularity);
1013 WTF::freePages(map2, WTF::kPageAllocationGranularity);
1015 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
1017 // And make sure we really did get a page in a new superpage.
1018 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
1019 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
1021 FreeFullPage(pageInThirdSuperPage);
1022 for (i = 0; i < numPartitionPagesNeeded; ++i) {
1023 FreeFullPage(firstSuperPagePages[i]);
1024 FreeFullPage(secondSuperPagePages[i]);
1030 // Tests that pages in the free page cache do get freed as appropriate.
1031 TEST(PartitionAllocTest, FreeCache)
1035 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
1037 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
1038 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
1039 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
1041 void* ptr = partitionAlloc(allocator.root(), bigSize);
1043 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
1044 EXPECT_EQ(0, bucket->freePagesHead);
1045 EXPECT_EQ(1, page->numAllocatedSlots);
1046 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1048 EXPECT_EQ(0, page->numAllocatedSlots);
1049 EXPECT_NE(-1, page->freeCacheIndex);
1050 EXPECT_TRUE(page->freelistHead);
1052 CycleFreeCache(kTestAllocSize);
1054 // Flushing the cache should have really freed the unused page.
1055 EXPECT_FALSE(page->freelistHead);
1056 EXPECT_EQ(-1, page->freeCacheIndex);
1057 EXPECT_EQ(0, page->numAllocatedSlots);
1058 WTF::PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBucketIndex];
1059 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
1061 // Check that an allocation works ok whilst in this state (a free'd page
1062 // as the active pages head).
1063 ptr = partitionAlloc(allocator.root(), bigSize);
1064 EXPECT_FALSE(bucket->freePagesHead);
1067 // Also check that a page that is bouncing immediately between empty and
1068 // used does not get freed.
1069 for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) {
1070 ptr = partitionAlloc(allocator.root(), bigSize);
1071 EXPECT_TRUE(page->freelistHead);
1073 EXPECT_TRUE(page->freelistHead);
1075 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1079 // Tests for a bug we had with losing references to free pages.
1080 TEST(PartitionAllocTest, LostFreePagesBug)
1084 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
1086 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1088 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1091 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
1092 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr2));
1093 WTF::PartitionBucket* bucket = page->bucket;
1095 EXPECT_EQ(0, bucket->freePagesHead);
1096 EXPECT_EQ(-1, page->numAllocatedSlots);
1097 EXPECT_EQ(1, page2->numAllocatedSlots);
1099 partitionFreeGeneric(genericAllocator.root(), ptr);
1100 partitionFreeGeneric(genericAllocator.root(), ptr2);
1102 EXPECT_EQ(0, bucket->freePagesHead);
1103 EXPECT_EQ(0, page->numAllocatedSlots);
1104 EXPECT_EQ(0, page2->numAllocatedSlots);
1105 EXPECT_TRUE(page->freelistHead);
1106 EXPECT_TRUE(page2->freelistHead);
1108 CycleGenericFreeCache(kTestAllocSize);
1110 EXPECT_FALSE(page->freelistHead);
1111 EXPECT_FALSE(page2->freelistHead);
1113 EXPECT_FALSE(bucket->freePagesHead);
1114 EXPECT_TRUE(bucket->activePagesHead);
1115 EXPECT_TRUE(bucket->activePagesHead->nextPage);
1117 // At this moment, we have two freed pages, on the freelist.
1119 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1121 partitionFreeGeneric(genericAllocator.root(), ptr);
1123 EXPECT_TRUE(bucket->activePagesHead);
1124 EXPECT_TRUE(bucket->freePagesHead);
1126 CycleGenericFreeCache(kTestAllocSize);
1128 // We're now set up to trigger the bug by scanning over the active pages
1129 // list, where the current active page is freed, and there exists at least
1130 // one freed page in the free pages list.
1131 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1133 partitionFreeGeneric(genericAllocator.root(), ptr);
1135 EXPECT_TRUE(bucket->activePagesHead);
1136 EXPECT_TRUE(bucket->freePagesHead);
1141 #if !CPU(64BIT) || OS(POSIX)
1143 // Tests that if an allocation fails in "return null" mode, repeating it doesn't
1144 // crash, and still returns null. The test tries to allocate 6 GB of memory in
1145 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB
1146 // using setrlimit() first.
1147 TEST(PartitionAllocTest, RepeatedReturnNull)
1151 EXPECT_TRUE(SetAddressSpaceLimit());
1153 // 512 kB x 12288 == 6 GB
1154 const size_t blockSize = 512 * 1024;
1155 const int numAllocations = 12288;
1157 void* ptrs[numAllocations];
1160 for (i = 0; i < numAllocations; ++i) {
1161 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, blockSize);
1163 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, blockSize);
1164 EXPECT_FALSE(ptrs[i]);
1169 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
1170 // we're not actually testing anything here.
1171 EXPECT_LT(i, numAllocations);
1173 // Free, reallocate and free again each block we allocated. We do this to
1174 // check that freeing memory also works correctly after a failed allocation.
1175 for (--i; i >= 0; --i) {
1176 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1177 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, blockSize);
1178 EXPECT_TRUE(ptrs[i]);
1179 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1182 EXPECT_TRUE(ClearAddressSpaceLimit());
1187 #endif // !CPU(64BIT) || OS(POSIX)
1191 // Make sure that malloc(-1) dies.
1192 // In the past, we had an integer overflow that would alias malloc(-1) to
1193 // malloc(0), which is not good.
1194 TEST(PartitionAllocDeathTest, LargeAllocs)
1198 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
1199 // And the smallest allocation we expect to die.
1200 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
1205 // Check that our immediate double-free detection works.
1206 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
1210 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1212 partitionFreeGeneric(genericAllocator.root(), ptr);
1214 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1219 // Check that our refcount-based double-free detection works.
1220 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
1224 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1226 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1228 partitionFreeGeneric(genericAllocator.root(), ptr);
1229 partitionFreeGeneric(genericAllocator.root(), ptr2);
1230 // This is not an immediate double-free so our immediate detection won't
1231 // fire. However, it does take the "refcount" of the partition page to -1,
1232 // which is illegal and should be trapped.
1233 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1238 // Check that guard pages are present where expected.
1239 TEST(PartitionAllocDeathTest, GuardPages)
1243 // This large size will result in a direct mapped allocation with guard
1244 // pages at either end.
1245 size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllocSize;
1246 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1248 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1250 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1251 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1253 partitionFreeGeneric(genericAllocator.root(), ptr);
1258 #endif // !OS(ANDROID)
1260 // Tests that the countLeadingZeros() functions work to our satisfaction.
1261 // It doesn't seem worth the overhead of a whole new file for these tests, so
1262 // we'll put them here since partitionAllocGeneric will depend heavily on these
1263 // functions working correctly.
1264 TEST(PartitionAllocTest, CLZWorks)
1266 EXPECT_EQ(32u, WTF::countLeadingZeros32(0u));
1267 EXPECT_EQ(31u, WTF::countLeadingZeros32(1u));
1268 EXPECT_EQ(1u, WTF::countLeadingZeros32(1u << 30));
1269 EXPECT_EQ(0u, WTF::countLeadingZeros32(1u << 31));
1272 EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
1273 EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
1274 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
1275 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
1276 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
1278 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0u));
1279 EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1u));
1280 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1u << 30));
1281 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1u << 31));
1287 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)