2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "wtf/PartitionAlloc.h"
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/OwnPtr.h"
36 #include "wtf/PassOwnPtr.h"
37 #include <gtest/gtest.h>
45 #define MAP_ANONYMOUS MAP_ANON
49 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
53 static const size_t kTestMaxAllocation = 4096;
54 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
55 static PartitionAllocatorGeneric genericAllocator;
57 static const size_t kTestAllocSize = 16;
59 static const size_t kPointerOffset = 0;
60 static const size_t kExtraAllocSize = 0;
62 static const size_t kPointerOffset = WTF::kCookieSize;
63 static const size_t kExtraAllocSize = WTF::kCookieSize * 2;
65 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
66 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
68 static void TestSetup()
71 genericAllocator.init();
74 static void TestShutdown()
76 // We expect no leaks in the general case. We have a test for leak
78 EXPECT_TRUE(allocator.shutdown());
79 EXPECT_TRUE(genericAllocator.shutdown());
82 static WTF::PartitionPage* GetFullPage(size_t size)
84 size_t realSize = size + kExtraAllocSize;
85 size_t bucketIdx = realSize >> WTF::kBucketShift;
86 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
87 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize;
91 for (i = 0; i < numSlots; ++i) {
92 void* ptr = partitionAlloc(allocator.root(), size);
95 first = WTF::partitionCookieFreePointerAdjust(ptr);
96 else if (i == numSlots - 1)
97 last = WTF::partitionCookieFreePointerAdjust(ptr);
99 EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(last));
100 if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPage)
101 EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
102 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
103 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
104 EXPECT_TRUE(bucket->activePagesHead);
105 EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage);
106 return bucket->activePagesHead;
109 static void FreeFullPage(WTF::PartitionPage* page)
111 size_t size = page->bucket->slotSize;
112 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / size;
113 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
114 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
116 for (i = 0; i < numSlots; ++i) {
117 partitionFree(ptr + kPointerOffset);
122 static void CycleFreeCache(size_t size)
124 size_t realSize = size + kExtraAllocSize;
125 size_t bucketIdx = realSize >> WTF::kBucketShift;
126 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
127 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
129 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
130 void* ptr = partitionAlloc(allocator.root(), size);
131 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
133 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
134 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
138 static void CycleGenericFreeCache(size_t size)
140 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
141 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
142 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
143 WTF::PartitionBucket* bucket = page->bucket;
144 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
145 partitionFreeGeneric(genericAllocator.root(), ptr);
146 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
147 EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
151 // Check that the most basic of allocate / free pairs work.
152 TEST(WTF_PartitionAlloc, Basic)
155 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
156 WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage;
158 EXPECT_FALSE(bucket->freePagesHead);
159 EXPECT_EQ(seedPage, bucket->activePagesHead);
160 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
162 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
164 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
165 // Check that the offset appears to include a guard page.
166 EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
169 // Expect that the last active page does not get tossed to the freelist.
170 EXPECT_FALSE(bucket->freePagesHead);
175 // Check that we can detect a memory leak.
176 TEST(WTF_PartitionAlloc, SimpleLeak)
179 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
181 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
183 EXPECT_FALSE(allocator.shutdown());
184 EXPECT_FALSE(genericAllocator.shutdown());
187 // Test multiple allocations, and freelist handling.
188 TEST(WTF_PartitionAlloc, MultiAlloc)
192 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
193 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
196 ptrdiff_t diff = ptr2 - ptr1;
197 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
199 // Check that we re-use the just-freed slot.
201 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
204 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
206 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
209 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
211 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
214 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
223 // Test a bucket with multiple pages.
224 TEST(WTF_PartitionAlloc, MultiPages)
227 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
229 WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
231 EXPECT_FALSE(bucket->freePagesHead);
232 EXPECT_EQ(page, bucket->activePagesHead);
233 EXPECT_EQ(0, page->nextPage);
234 EXPECT_EQ(0, page->numAllocatedSlots);
236 page = GetFullPage(kTestAllocSize);
237 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
239 EXPECT_EQ(page2, bucket->activePagesHead);
240 EXPECT_EQ(0, page2->nextPage);
241 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
243 // Fully free the non-current page. It should not be freelisted because
244 // there is no other immediately useable page. The other page is full.
246 EXPECT_EQ(0, page->numAllocatedSlots);
247 EXPECT_FALSE(bucket->freePagesHead);
248 EXPECT_EQ(page, bucket->activePagesHead);
250 // Allocate a new page, it should pull from the freelist.
251 page = GetFullPage(kTestAllocSize);
252 EXPECT_FALSE(bucket->freePagesHead);
253 EXPECT_EQ(page, bucket->activePagesHead);
257 EXPECT_EQ(0, page->numAllocatedSlots);
258 EXPECT_EQ(0, page2->numAllocatedSlots);
259 EXPECT_EQ(0, page2->numUnprovisionedSlots);
260 EXPECT_NE(-1, page2->freeCacheIndex);
265 // Test some finer aspects of internal page transitions.
266 TEST(WTF_PartitionAlloc, PageTransitions)
269 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
271 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
272 EXPECT_EQ(page1, bucket->activePagesHead);
273 EXPECT_EQ(0, page1->nextPage);
274 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
275 EXPECT_EQ(page2, bucket->activePagesHead);
276 EXPECT_EQ(0, page2->nextPage);
278 // Bounce page1 back into the non-full list then fill it up again.
279 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
281 EXPECT_EQ(page1, bucket->activePagesHead);
282 (void) partitionAlloc(allocator.root(), kTestAllocSize);
283 EXPECT_EQ(page1, bucket->activePagesHead);
284 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
286 // Allocating another page at this point should cause us to scan over page1
287 // (which is both full and NOT our current page), and evict it from the
288 // freelist. Older code had a O(n^2) condition due to failure to do this.
289 WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
290 EXPECT_EQ(page3, bucket->activePagesHead);
291 EXPECT_EQ(0, page3->nextPage);
293 // Work out a pointer into page2 and free it.
294 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
296 // Trying to allocate at this time should cause us to cycle around to page2
297 // and find the recently freed slot.
298 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
299 EXPECT_EQ(ptr, newPtr);
300 EXPECT_EQ(page2, bucket->activePagesHead);
301 EXPECT_EQ(page3, page2->nextPage);
303 // Work out a pointer into page1 and free it. This should pull the page
304 // back into the list of available pages.
305 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
307 // This allocation should be satisfied by page1.
308 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
309 EXPECT_EQ(ptr, newPtr);
310 EXPECT_EQ(page1, bucket->activePagesHead);
311 EXPECT_EQ(page2, page1->nextPage);
317 // Allocating whilst in this state exposed a bug, so keep the test.
318 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
324 // Test some corner cases relating to page transitions in the internal
325 // free page list metadata bucket.
326 TEST(WTF_PartitionAlloc, FreePageListPageTransitions)
329 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
331 size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
332 // The +1 is because we need to account for the fact that the current page
333 // never gets thrown on the freelist.
334 ++numToFillFreeListPage;
335 OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
338 for (i = 0; i < numToFillFreeListPage; ++i) {
339 pages[i] = GetFullPage(kTestAllocSize);
341 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
342 for (i = 0; i < numToFillFreeListPage; ++i)
343 FreeFullPage(pages[i]);
344 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
345 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
346 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
347 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
349 // Allocate / free in a different bucket size so we get control of a
350 // different free page list. We need two pages because one will be the last
351 // active page and not get freed.
352 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
353 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
357 // If we re-allocate all kTestAllocSize allocations, we'll pull all the
358 // free pages and end up freeing the first page for free page objects.
359 // It's getting a bit tricky but a nice re-entrancy is going on:
360 // alloc(kTestAllocSize) -> pulls page from free page list ->
361 // free(PartitionFreepagelistEntry) -> last entry in page freed ->
362 // alloc(PartitionFreepagelistEntry).
363 for (i = 0; i < numToFillFreeListPage; ++i) {
364 pages[i] = GetFullPage(kTestAllocSize);
366 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
368 // As part of the final free-up, we'll test another re-entrancy:
369 // free(kTestAllocSize) -> last entry in page freed ->
370 // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
371 // free(PartitionFreepagelistEntry)
372 for (i = 0; i < numToFillFreeListPage; ++i)
373 FreeFullPage(pages[i]);
374 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
375 EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
376 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
377 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
382 // Test a large series of allocations that cross more than one underlying
383 // 64KB super page allocation.
384 TEST(WTF_PartitionAlloc, MultiPageAllocs)
387 // This is guaranteed to cross a super page boundary because the first
388 // partition page "slot" will be taken up by a guard page.
389 size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
390 // The super page should begin and end in a guard so we one less page in
391 // order to allocate a single page in the new super page.
394 EXPECT_GT(numPagesNeeded, 1u);
395 OwnPtr<WTF::PartitionPage*[]> pages;
396 pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
397 uintptr_t firstSuperPageBase = 0;
399 for (i = 0; i < numPagesNeeded; ++i) {
400 pages[i] = GetFullPage(kTestAllocSize);
401 void* storagePtr = partitionPageToPointer(pages[i]);
403 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
404 if (i == numPagesNeeded - 1) {
405 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
406 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
407 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
408 // Check that we allocated a guard page for the second page.
409 EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
412 for (i = 0; i < numPagesNeeded; ++i)
413 FreeFullPage(pages[i]);
418 // Test the generic allocation functions that can handle arbitrary sizes and
420 TEST(WTF_PartitionAlloc, GenericAlloc)
424 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
426 partitionFreeGeneric(genericAllocator.root(), ptr);
427 ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBucketed + 1);
429 partitionFreeGeneric(genericAllocator.root(), ptr);
431 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
434 char* charPtr = static_cast<char*>(ptr);
437 // Change the size of the realloc, remaining inside the same bucket.
438 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
439 EXPECT_EQ(ptr, newPtr);
440 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
441 EXPECT_EQ(ptr, newPtr);
442 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket);
443 EXPECT_EQ(ptr, newPtr);
445 // Change the size of the realloc, switching buckets.
446 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket + 1);
447 EXPECT_NE(newPtr, ptr);
448 // Check that the realloc copied correctly.
449 char* newCharPtr = static_cast<char*>(newPtr);
450 EXPECT_EQ(*newCharPtr, 'A');
452 // Subtle: this checks for an old bug where we copied too much from the
453 // source of the realloc. The condition can be detected by a trashing of
454 // the uninitialized value in the space of the upsized allocation.
455 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket)));
458 // The realloc moved. To check that the old allocation was freed, we can
459 // do an alloc of the old allocation size and check that the old allocation
460 // address is at the head of the freelist and reused.
461 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
462 EXPECT_EQ(reusedPtr, origPtr);
463 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
465 // Downsize the realloc.
467 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
468 EXPECT_EQ(newPtr, origPtr);
469 newCharPtr = static_cast<char*>(newPtr);
470 EXPECT_EQ(*newCharPtr, 'B');
473 // Upsize the realloc to outside the partition.
475 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 1);
476 EXPECT_NE(newPtr, ptr);
477 newCharPtr = static_cast<char*>(newPtr);
478 EXPECT_EQ(*newCharPtr, 'C');
481 // Upsize and downsize the realloc, remaining outside the partition.
483 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 10);
484 newCharPtr = static_cast<char*>(newPtr);
485 EXPECT_EQ(*newCharPtr, 'D');
488 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 2);
489 newCharPtr = static_cast<char*>(newPtr);
490 EXPECT_EQ(*newCharPtr, 'E');
493 // Downsize the realloc to inside the partition.
495 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
496 EXPECT_NE(newPtr, ptr);
497 EXPECT_EQ(newPtr, origPtr);
498 newCharPtr = static_cast<char*>(newPtr);
499 EXPECT_EQ(*newCharPtr, 'F');
501 partitionFreeGeneric(genericAllocator.root(), newPtr);
505 // Test the generic allocation functions can handle some specific sizes of
507 TEST(WTF_PartitionAlloc, GenericAllocSizes)
511 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
513 partitionFreeGeneric(genericAllocator.root(), ptr);
515 // kPartitionPageSize is interesting because it results in just one
516 // allocation per page, which tripped up some corner cases.
517 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
518 ptr = partitionAllocGeneric(genericAllocator.root(), size);
520 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
522 partitionFreeGeneric(genericAllocator.root(), ptr);
523 // Should be freeable at this point.
524 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
525 EXPECT_NE(-1, page->freeCacheIndex);
526 partitionFreeGeneric(genericAllocator.root(), ptr2);
528 size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WTF::kSystemPageSize) / 2) - kExtraAllocSize;
529 ptr = partitionAllocGeneric(genericAllocator.root(), size);
531 memset(ptr, 'A', size);
532 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
534 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
536 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
539 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
540 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr3));
541 EXPECT_NE(page, page2);
543 partitionFreeGeneric(genericAllocator.root(), ptr);
544 partitionFreeGeneric(genericAllocator.root(), ptr3);
545 partitionFreeGeneric(genericAllocator.root(), ptr2);
546 // Should be freeable at this point.
547 EXPECT_NE(-1, page->freeCacheIndex);
548 EXPECT_EQ(0, page->numAllocatedSlots);
549 EXPECT_EQ(0, page->numUnprovisionedSlots);
550 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
551 EXPECT_EQ(ptr3, newPtr);
552 newPtr = partitionAllocGeneric(genericAllocator.root(), size);
553 EXPECT_EQ(ptr2, newPtr);
554 #if OS(LINUX) && defined(NDEBUG)
555 // On Linux, we have a guarantee that freelisting a page should cause its
556 // contents to be nulled out. We check for null here to detect an bug we
557 // had where a large slot size was causing us to not properly free all
558 // resources back to the system.
559 // We only run the check in optimized builds because the debug build
560 // writes over the allocated area with an "uninitialized" byte pattern.
561 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
563 partitionFreeGeneric(genericAllocator.root(), newPtr);
564 partitionFreeGeneric(genericAllocator.root(), ptr3);
565 partitionFreeGeneric(genericAllocator.root(), ptr4);
567 // Can we allocate a massive (512MB) size?
568 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
569 partitionFreeGeneric(genericAllocator.root(), ptr);
571 // Check a more reasonable, but still direct mapped, size.
572 // Chop a system page and a byte off to test for rounding errors.
573 size = 20 * 1024 * 1024;
574 size -= WTF::kSystemPageSize;
576 ptr = partitionAllocGeneric(genericAllocator.root(), size);
577 char* charPtr = reinterpret_cast<char*>(ptr);
578 *(charPtr + (size - 1)) = 'A';
579 partitionFreeGeneric(genericAllocator.root(), ptr);
582 partitionFreeGeneric(genericAllocator.root(), 0);
584 // Do we correctly get a null for a failed allocation?
585 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
590 // Test the realloc() contract.
591 TEST(WTF_PartitionAlloc, Realloc)
595 // realloc(0, size) should be equivalent to malloc().
596 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
597 memset(ptr, 'A', kTestAllocSize);
598 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
599 // realloc(ptr, 0) should be equivalent to free().
600 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
602 EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead);
607 // Tests the handing out of freelists for partial pages.
608 TEST(WTF_PartitionAlloc, PartialPageFreelists)
612 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
613 EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
614 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
615 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
616 EXPECT_EQ(0, bucket->freePagesHead);
618 void* ptr = partitionAlloc(allocator.root(), bigSize);
621 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
622 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (bigSize + kExtraAllocSize);
623 EXPECT_EQ(4u, totalSlots);
624 // The freelist should have one entry, because we were able to exactly fit
625 // one object slot and one freelist pointer (the null that the head points
626 // to) into a system page.
627 EXPECT_TRUE(page->freelistHead);
628 EXPECT_EQ(1, page->numAllocatedSlots);
629 EXPECT_EQ(2, page->numUnprovisionedSlots);
631 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
633 EXPECT_FALSE(page->freelistHead);
634 EXPECT_EQ(2, page->numAllocatedSlots);
635 EXPECT_EQ(2, page->numUnprovisionedSlots);
637 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
639 EXPECT_TRUE(page->freelistHead);
640 EXPECT_EQ(3, page->numAllocatedSlots);
641 EXPECT_EQ(0, page->numUnprovisionedSlots);
643 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
645 EXPECT_FALSE(page->freelistHead);
646 EXPECT_EQ(4, page->numAllocatedSlots);
647 EXPECT_EQ(0, page->numUnprovisionedSlots);
649 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
652 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
653 EXPECT_EQ(1, page2->numAllocatedSlots);
655 // Churn things a little whilst there's a partial page freelist.
657 ptr = partitionAlloc(allocator.root(), bigSize);
658 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
666 EXPECT_NE(-1, page->freeCacheIndex);
667 EXPECT_NE(-1, page2->freeCacheIndex);
668 EXPECT_TRUE(page2->freelistHead);
669 EXPECT_EQ(0, page2->numAllocatedSlots);
671 // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
672 size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize;
673 bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
674 bucket = &allocator.root()->buckets()[bucketIdx];
675 EXPECT_EQ(0, bucket->freePagesHead);
677 ptr = partitionAlloc(allocator.root(), mediumSize);
679 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
680 EXPECT_EQ(1, page->numAllocatedSlots);
681 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (mediumSize + kExtraAllocSize);
682 size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
683 EXPECT_EQ(2u, firstPageSlots);
684 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
688 size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize;
689 bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
690 bucket = &allocator.root()->buckets()[bucketIdx];
691 EXPECT_EQ(0, bucket->freePagesHead);
693 ptr = partitionAlloc(allocator.root(), smallSize);
695 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
696 EXPECT_EQ(1, page->numAllocatedSlots);
697 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (smallSize + kExtraAllocSize);
698 firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
699 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
702 EXPECT_TRUE(page->freelistHead);
703 EXPECT_EQ(0, page->numAllocatedSlots);
705 size_t verySmallSize = 32 - kExtraAllocSize;
706 bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
707 bucket = &allocator.root()->buckets()[bucketIdx];
708 EXPECT_EQ(0, bucket->freePagesHead);
710 ptr = partitionAlloc(allocator.root(), verySmallSize);
712 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
713 EXPECT_EQ(1, page->numAllocatedSlots);
714 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (verySmallSize + kExtraAllocSize);
715 firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
716 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
719 EXPECT_TRUE(page->freelistHead);
720 EXPECT_EQ(0, page->numAllocatedSlots);
722 // And try an allocation size (against the generic allocator) that is
723 // larger than a system page.
724 size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2)) - kExtraAllocSize;
725 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
727 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
728 EXPECT_EQ(1, page->numAllocatedSlots);
729 EXPECT_TRUE(page->freelistHead);
730 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
731 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
732 partitionFreeGeneric(genericAllocator.root(), ptr);
734 // And then make sure than exactly the page size only faults one page.
735 size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize;
736 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
738 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
739 EXPECT_EQ(1, page->numAllocatedSlots);
740 EXPECT_FALSE(page->freelistHead);
741 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageSize + kExtraAllocSize);
742 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
743 partitionFreeGeneric(genericAllocator.root(), ptr);
748 // Test some of the fragmentation-resistant properties of the allocator.
749 TEST(WTF_PartitionAlloc, PageRefilling)
752 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
754 // Grab two full pages and a non-full page.
755 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
756 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
757 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
759 EXPECT_NE(page1, bucket->activePagesHead);
760 EXPECT_NE(page2, bucket->activePagesHead);
761 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
762 EXPECT_EQ(1, page->numAllocatedSlots);
764 // Work out a pointer into page2 and free it; and then page1 and free it.
765 char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
767 ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
770 // If we perform two allocations from the same bucket now, we expect to
771 // refill both the nearly full pages.
772 (void) partitionAlloc(allocator.root(), kTestAllocSize);
773 (void) partitionAlloc(allocator.root(), kTestAllocSize);
774 EXPECT_EQ(1, page->numAllocatedSlots);
783 // Basic tests to ensure that allocations work for partial page buckets.
784 TEST(WTF_PartitionAlloc, PartialPages)
788 // Find a size that is backed by a partial partition page.
789 size_t size = sizeof(void*);
790 WTF::PartitionBucket* bucket = 0;
791 while (size < kTestMaxAllocation) {
792 bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
793 if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartitionPage)
795 size += sizeof(void*);
797 EXPECT_LT(size, kTestMaxAllocation);
799 WTF::PartitionPage* page1 = GetFullPage(size);
800 WTF::PartitionPage* page2 = GetFullPage(size);
807 // Test correct handling if our mapping collides with another.
808 TEST(WTF_PartitionAlloc, MappingCollision)
811 // The -2 is because the first and last partition pages in a super page are
813 size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
814 OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
815 OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
818 for (i = 0; i < numPartitionPagesNeeded; ++i)
819 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
821 char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
822 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
823 pageBase -= WTF::kPartitionPageSize;
824 // Map a single system page either side of the mapping for our allocations,
825 // with the goal of tripping up alignment of the next mapping.
826 void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
828 void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
830 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
831 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
833 for (i = 0; i < numPartitionPagesNeeded; ++i)
834 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
836 WTF::freePages(map1, WTF::kPageAllocationGranularity);
837 WTF::freePages(map2, WTF::kPageAllocationGranularity);
839 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
840 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
841 pageBase -= WTF::kPartitionPageSize;
842 // Map a single system page either side of the mapping for our allocations,
843 // with the goal of tripping up alignment of the next mapping.
844 map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
846 map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
848 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
849 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
851 WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
852 WTF::freePages(map1, WTF::kPageAllocationGranularity);
853 WTF::freePages(map2, WTF::kPageAllocationGranularity);
855 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
857 // And make sure we really did get a page in a new superpage.
858 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
859 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
861 FreeFullPage(pageInThirdSuperPage);
862 for (i = 0; i < numPartitionPagesNeeded; ++i) {
863 FreeFullPage(firstSuperPagePages[i]);
864 FreeFullPage(secondSuperPagePages[i]);
870 // Tests that pages in the free page cache do get freed as appropriate.
871 TEST(WTF_PartitionAlloc, FreeCache)
875 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
876 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
877 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
879 void* ptr = partitionAlloc(allocator.root(), bigSize);
881 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
882 EXPECT_EQ(0, bucket->freePagesHead);
883 EXPECT_EQ(1, page->numAllocatedSlots);
885 EXPECT_EQ(0, page->numAllocatedSlots);
886 EXPECT_NE(-1, page->freeCacheIndex);
887 EXPECT_TRUE(page->freelistHead);
889 CycleFreeCache(kTestAllocSize);
891 // Flushing the cache should have really freed the unused page.
892 EXPECT_FALSE(page->freelistHead);
893 EXPECT_EQ(-1, page->freeCacheIndex);
894 EXPECT_EQ(0, page->numAllocatedSlots);
896 // Check that an allocation works ok whilst in this state (a free'd page
897 // as the active pages head).
898 ptr = partitionAlloc(allocator.root(), bigSize);
899 EXPECT_FALSE(bucket->freePagesHead);
902 // Also check that a page that is bouncing immediately between empty and
903 // used does not get freed.
904 for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) {
905 ptr = partitionAlloc(allocator.root(), bigSize);
906 EXPECT_TRUE(page->freelistHead);
908 EXPECT_TRUE(page->freelistHead);
914 // Tests for a bug we had with losing references to free pages.
915 TEST(WTF_PartitionAlloc, LostFreePagesBug)
919 size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
921 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
923 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
926 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
927 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr2));
928 WTF::PartitionBucket* bucket = page->bucket;
930 EXPECT_EQ(0, bucket->freePagesHead);
931 EXPECT_EQ(-1, page->numAllocatedSlots);
932 EXPECT_EQ(1, page2->numAllocatedSlots);
934 partitionFreeGeneric(genericAllocator.root(), ptr);
935 partitionFreeGeneric(genericAllocator.root(), ptr2);
937 EXPECT_EQ(0, bucket->freePagesHead);
938 EXPECT_EQ(0, page->numAllocatedSlots);
939 EXPECT_EQ(0, page2->numAllocatedSlots);
940 EXPECT_TRUE(page->freelistHead);
941 EXPECT_TRUE(page2->freelistHead);
943 CycleGenericFreeCache(kTestAllocSize);
945 EXPECT_FALSE(page->freelistHead);
946 EXPECT_FALSE(page2->freelistHead);
948 EXPECT_FALSE(bucket->freePagesHead);
949 EXPECT_TRUE(bucket->activePagesHead);
950 EXPECT_TRUE(bucket->activePagesHead->nextPage);
952 // At this moment, we have two freed pages, on the freelist.
954 ptr = partitionAllocGeneric(genericAllocator.root(), size);
956 partitionFreeGeneric(genericAllocator.root(), ptr);
958 EXPECT_TRUE(bucket->activePagesHead);
959 EXPECT_TRUE(bucket->freePagesHead);
961 CycleGenericFreeCache(kTestAllocSize);
963 // We're now set up to trigger the bug by scanning over the active pages
964 // list, where the current active page is freed, and there exists at least
965 // one freed page in the free pages list.
966 ptr = partitionAllocGeneric(genericAllocator.root(), size);
968 partitionFreeGeneric(genericAllocator.root(), ptr);
970 EXPECT_TRUE(bucket->activePagesHead);
971 EXPECT_TRUE(bucket->freePagesHead);
976 // Make sure that malloc(-1) dies.
977 // In the past, we had an integer overflow that would alias malloc(-1) to
978 // malloc(0), which is not good.
979 TEST(WTF_PartitionAllocDeathTest, LargeAllocs)
983 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
984 // And the smallest allocation we expect to die.
985 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
990 // Check that our immediate double-free detection works.
991 TEST(WTF_PartitionAllocDeathTest, ImmediateDoubleFree)
995 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
997 partitionFreeGeneric(genericAllocator.root(), ptr);
999 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1004 // Check that our refcount-based double-free detection works.
1005 TEST(WTF_PartitionAllocDeathTest, RefcountDoubleFree)
1009 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1011 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1013 partitionFreeGeneric(genericAllocator.root(), ptr);
1014 partitionFreeGeneric(genericAllocator.root(), ptr2);
1015 // This is not an immediate double-free so our immediate detection won't
1016 // fire. However, it does take the "refcount" of the partition page to -1,
1017 // which is illegal and should be trapped.
1018 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1023 // Check that guard pages are present where expected.
1024 TEST(WTF_PartitionAllocDeathTest, GuardPages)
1028 // This large size will result in a direct mapped allocation with guard
1029 // pages at either end.
1030 size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllocSize;
1031 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1033 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1035 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1036 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1038 partitionFreeGeneric(genericAllocator.root(), ptr);
1043 // Tests that the countLeadingZeros() functions work to our satisfaction.
1044 // It doesn't seem worth the overhead of a whole new file for these tests, so
1045 // we'll put them here since partitionAllocGeneric will depend heavily on these
1046 // functions working correctly.
1047 TEST(WTF_PartitionAlloc, CLZWorks)
1049 EXPECT_EQ(32u, WTF::countLeadingZeros32(0));
1050 EXPECT_EQ(31u, WTF::countLeadingZeros32(1));
1051 EXPECT_EQ(1u, WTF::countLeadingZeros32(1 << 30));
1052 EXPECT_EQ(0u, WTF::countLeadingZeros32(1 << 31));
1055 EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
1056 EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
1057 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
1058 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
1059 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
1061 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0));
1062 EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1));
1063 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1 << 30));
1064 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1 << 31));
1070 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)