Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: third_party/WebKit/Source/wtf/PartitionAllocTest.cpp

Issue 1881983003: Move PartitionAlloc related things into wtf/allocator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "wtf/PartitionAlloc.h"
32
33 #include "testing/gtest/include/gtest/gtest.h"
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/CPU.h"
36 #include "wtf/OwnPtr.h"
37 #include "wtf/PassOwnPtr.h"
38 #include "wtf/Vector.h"
39 #include <stdlib.h>
40 #include <string.h>
41
42 #if OS(POSIX)
43 #include <sys/mman.h>
44 #include <sys/resource.h>
45 #include <sys/time.h>
46
47 #ifndef MAP_ANONYMOUS
48 #define MAP_ANONYMOUS MAP_ANON
49 #endif
50 #endif // OS(POSIX)
51
52 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
53
54 namespace WTF {
55
56 namespace {
57
58 const size_t kTestMaxAllocation = 4096;
59 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
60 PartitionAllocatorGeneric genericAllocator;
61
62 const size_t kTestAllocSize = 16;
63 #if !ENABLE(ASSERT)
64 const size_t kPointerOffset = 0;
65 const size_t kExtraAllocSize = 0;
66 #else
67 const size_t kPointerOffset = WTF::kCookieSize;
68 const size_t kExtraAllocSize = WTF::kCookieSize * 2;
69 #endif
70 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
71 const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
72
73 const char* typeName = nullptr;
74
75 void TestSetup()
76 {
77 allocator.init();
78 genericAllocator.init();
79 }
80
81 void TestShutdown()
82 {
83 // We expect no leaks in the general case. We have a test for leak
84 // detection.
85 EXPECT_TRUE(allocator.shutdown());
86 EXPECT_TRUE(genericAllocator.shutdown());
87 }
88
89 #if !CPU(64BIT) || OS(POSIX)
90 bool SetAddressSpaceLimit()
91 {
92 #if !CPU(64BIT)
93 // 32 bits => address space is limited already.
94 return true;
95 #elif OS(POSIX) && !OS(MACOSX)
96 // Mac will accept RLIMIT_AS changes but it is not enforced.
97 // See https://crbug.com/435269 and rdar://17576114.
98 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024;
99 struct rlimit limit;
100 if (getrlimit(RLIMIT_AS, &limit) != 0)
101 return false;
102 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
103 limit.rlim_cur = kAddressSpaceLimit;
104 if (setrlimit(RLIMIT_AS, &limit) != 0)
105 return false;
106 }
107 return true;
108 #else
109 return false;
110 #endif
111 }
112
113 bool ClearAddressSpaceLimit()
114 {
115 #if !CPU(64BIT)
116 return true;
117 #elif OS(POSIX)
118 struct rlimit limit;
119 if (getrlimit(RLIMIT_AS, &limit) != 0)
120 return false;
121 limit.rlim_cur = limit.rlim_max;
122 if (setrlimit(RLIMIT_AS, &limit) != 0)
123 return false;
124 return true;
125 #else
126 return false;
127 #endif
128 }
129 #endif
130
131 PartitionPage* GetFullPage(size_t size)
132 {
133 size_t realSize = size + kExtraAllocSize;
134 size_t bucketIdx = realSize >> kBucketShift;
135 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
136 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / re alSize;
137 void* first = 0;
138 void* last = 0;
139 size_t i;
140 for (i = 0; i < numSlots; ++i) {
141 void* ptr = partitionAlloc(allocator.root(), size, typeName);
142 EXPECT_TRUE(ptr);
143 if (!i)
144 first = partitionCookieFreePointerAdjust(ptr);
145 else if (i == numSlots - 1)
146 last = partitionCookieFreePointerAdjust(ptr);
147 }
148 EXPECT_EQ(partitionPointerToPage(first), partitionPointerToPage(last));
149 if (bucket->numSystemPagesPerSlotSpan == kNumSystemPagesPerPartitionPage)
150 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, rein terpret_cast<size_t>(last) & kPartitionPageBaseMask);
151 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocate dSlots));
152 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
153 EXPECT_TRUE(bucket->activePagesHead);
154 EXPECT_TRUE(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
155 return bucket->activePagesHead;
156 }
157
158 void FreeFullPage(PartitionPage* page)
159 {
160 size_t size = page->bucket->slotSize;
161 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize ) / size;
162 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
163 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
164 size_t i;
165 for (i = 0; i < numSlots; ++i) {
166 partitionFree(ptr + kPointerOffset);
167 ptr += size;
168 }
169 }
170
171 void CycleFreeCache(size_t size)
172 {
173 size_t realSize = size + kExtraAllocSize;
174 size_t bucketIdx = realSize >> kBucketShift;
175 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
176 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
177
178 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
179 void* ptr = partitionAlloc(allocator.root(), size, typeName);
180 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
181 partitionFree(ptr);
182 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
183 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
184 }
185 }
186
187 void CycleGenericFreeCache(size_t size)
188 {
189 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
190 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeNam e);
191 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerA djust(ptr));
192 PartitionBucket* bucket = page->bucket;
193 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
194 partitionFreeGeneric(genericAllocator.root(), ptr);
195 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
196 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
197 }
198 }
199
200 void CheckPageInCore(void* ptr, bool inCore)
201 {
202 #if OS(LINUX)
203 unsigned char ret;
204 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
205 EXPECT_EQ(inCore, ret);
206 #endif
207 }
208
209 class MockPartitionStatsDumper : public PartitionStatsDumper {
210 public:
211 MockPartitionStatsDumper()
212 : m_totalResidentBytes(0)
213 , m_totalActiveBytes(0)
214 , m_totalDecommittableBytes(0)
215 , m_totalDiscardableBytes(0) { }
216
217 void partitionDumpTotals(const char* partitionName, const PartitionMemorySta ts* memoryStats) override
218 {
219 EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentByte s);
220 EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes);
221 EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes);
222 EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableByte s);
223 EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes);
224 }
225
226 void partitionsDumpBucketStats(const char* partitionName, const PartitionBuc ketMemoryStats* memoryStats) override
227 {
228 (void) partitionName;
229 EXPECT_TRUE(memoryStats->isValid);
230 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask);
231 m_bucketStats.append(*memoryStats);
232 m_totalResidentBytes += memoryStats->residentBytes;
233 m_totalActiveBytes += memoryStats->activeBytes;
234 m_totalDecommittableBytes += memoryStats->decommittableBytes;
235 m_totalDiscardableBytes += memoryStats->discardableBytes;
236 }
237
238 bool IsMemoryAllocationRecorded()
239 {
240 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0;
241 }
242
243 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize)
244 {
245 for (size_t i = 0; i < m_bucketStats.size(); ++i) {
246 if (m_bucketStats[i].bucketSlotSize == bucketSize)
247 return &m_bucketStats[i];
248 }
249 return 0;
250 }
251
252 private:
253 size_t m_totalResidentBytes;
254 size_t m_totalActiveBytes;
255 size_t m_totalDecommittableBytes;
256 size_t m_totalDiscardableBytes;
257
258 Vector<PartitionBucketMemoryStats> m_bucketStats;
259 };
260
261 } // anonymous namespace
262
263 // Check that the most basic of allocate / free pairs work.
264 TEST(PartitionAllocTest, Basic)
265 {
266 TestSetup();
267 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
268 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage;
269
270 EXPECT_FALSE(bucket->emptyPagesHead);
271 EXPECT_FALSE(bucket->decommittedPagesHead);
272 EXPECT_EQ(seedPage, bucket->activePagesHead);
273 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
274
275 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName);
276 EXPECT_TRUE(ptr);
277 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & kPartitionPageOffs etMask);
278 // Check that the offset appears to include a guard page.
279 EXPECT_EQ(kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
280
281 partitionFree(ptr);
282 // Expect that the last active page gets noticed as empty but doesn't get
283 // decommitted.
284 EXPECT_TRUE(bucket->emptyPagesHead);
285 EXPECT_FALSE(bucket->decommittedPagesHead);
286
287 TestShutdown();
288 }
289
290 // Check that we can detect a memory leak.
291 TEST(PartitionAllocTest, SimpleLeak)
292 {
293 TestSetup();
294 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize, typeName) ;
295 (void)leakedPtr;
296 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAlloc Size, typeName);
297 (void)leakedPtr2;
298 EXPECT_FALSE(allocator.shutdown());
299 EXPECT_FALSE(genericAllocator.shutdown());
300 }
301
302 // Test multiple allocations, and freelist handling.
303 TEST(PartitionAllocTest, MultiAlloc)
304 {
305 TestSetup();
306
307 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestA llocSize, typeName));
308 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestA llocSize, typeName));
309 EXPECT_TRUE(ptr1);
310 EXPECT_TRUE(ptr2);
311 ptrdiff_t diff = ptr2 - ptr1;
312 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
313
314 // Check that we re-use the just-freed slot.
315 partitionFree(ptr2);
316 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSi ze, typeName));
317 EXPECT_TRUE(ptr2);
318 diff = ptr2 - ptr1;
319 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
320 partitionFree(ptr1);
321 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSi ze, typeName));
322 EXPECT_TRUE(ptr1);
323 diff = ptr2 - ptr1;
324 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
325
326 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestA llocSize, typeName));
327 EXPECT_TRUE(ptr3);
328 diff = ptr3 - ptr1;
329 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
330
331 partitionFree(ptr1);
332 partitionFree(ptr2);
333 partitionFree(ptr3);
334
335 TestShutdown();
336 }
337
338 // Test a bucket with multiple pages.
339 TEST(PartitionAllocTest, MultiPages)
340 {
341 TestSetup();
342 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
343
344 PartitionPage* page = GetFullPage(kTestAllocSize);
345 FreeFullPage(page);
346 EXPECT_TRUE(bucket->emptyPagesHead);
347 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
348 EXPECT_EQ(0, page->nextPage);
349 EXPECT_EQ(0, page->numAllocatedSlots);
350
351 page = GetFullPage(kTestAllocSize);
352 PartitionPage* page2 = GetFullPage(kTestAllocSize);
353
354 EXPECT_EQ(page2, bucket->activePagesHead);
355 EXPECT_EQ(0, page2->nextPage);
356 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & kSuper PageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & kSupe rPageBaseMask);
357
358 // Fully free the non-current page. This will leave us with no current
359 // active page because one is empty and the other is full.
360 FreeFullPage(page);
361 EXPECT_EQ(0, page->numAllocatedSlots);
362 EXPECT_TRUE(bucket->emptyPagesHead);
363 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
364
365 // Allocate a new page, it should pull from the freelist.
366 page = GetFullPage(kTestAllocSize);
367 EXPECT_FALSE(bucket->emptyPagesHead);
368 EXPECT_EQ(page, bucket->activePagesHead);
369
370 FreeFullPage(page);
371 FreeFullPage(page2);
372 EXPECT_EQ(0, page->numAllocatedSlots);
373 EXPECT_EQ(0, page2->numAllocatedSlots);
374 EXPECT_EQ(0, page2->numUnprovisionedSlots);
375 EXPECT_NE(-1, page2->emptyCacheIndex);
376
377 TestShutdown();
378 }
379
380 // Test some finer aspects of internal page transitions.
381 TEST(PartitionAllocTest, PageTransitions)
382 {
383 TestSetup();
384 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
385
386 PartitionPage* page1 = GetFullPage(kTestAllocSize);
387 EXPECT_EQ(page1, bucket->activePagesHead);
388 EXPECT_EQ(0, page1->nextPage);
389 PartitionPage* page2 = GetFullPage(kTestAllocSize);
390 EXPECT_EQ(page2, bucket->activePagesHead);
391 EXPECT_EQ(0, page2->nextPage);
392
393 // Bounce page1 back into the non-full list then fill it up again.
394 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointe rOffset;
395 partitionFree(ptr);
396 EXPECT_EQ(page1, bucket->activePagesHead);
397 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName);
398 EXPECT_EQ(page1, bucket->activePagesHead);
399 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
400
401 // Allocating another page at this point should cause us to scan over page1
402 // (which is both full and NOT our current page), and evict it from the
403 // freelist. Older code had a O(n^2) condition due to failure to do this.
404 PartitionPage* page3 = GetFullPage(kTestAllocSize);
405 EXPECT_EQ(page3, bucket->activePagesHead);
406 EXPECT_EQ(0, page3->nextPage);
407
408 // Work out a pointer into page2 and free it.
409 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffse t;
410 partitionFree(ptr);
411 // Trying to allocate at this time should cause us to cycle around to page2
412 // and find the recently freed slot.
413 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTes tAllocSize, typeName));
414 EXPECT_EQ(ptr, newPtr);
415 EXPECT_EQ(page2, bucket->activePagesHead);
416 EXPECT_EQ(page3, page2->nextPage);
417
418 // Work out a pointer into page1 and free it. This should pull the page
419 // back into the list of available pages.
420 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffse t;
421 partitionFree(ptr);
422 // This allocation should be satisfied by page1.
423 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAlloc Size, typeName));
424 EXPECT_EQ(ptr, newPtr);
425 EXPECT_EQ(page1, bucket->activePagesHead);
426 EXPECT_EQ(page2, page1->nextPage);
427
428 FreeFullPage(page3);
429 FreeFullPage(page2);
430 FreeFullPage(page1);
431
432 // Allocating whilst in this state exposed a bug, so keep the test.
433 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSiz e, typeName));
434 partitionFree(ptr);
435
436 TestShutdown();
437 }
438
439 // Test some corner cases relating to page transitions in the internal
440 // free page list metadata bucket.
441 TEST(PartitionAllocTest, FreePageListPageTransitions)
442 {
443 TestSetup();
444 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
445
446 size_t numToFillFreeListPage = kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
447 // The +1 is because we need to account for the fact that the current page
448 // never gets thrown on the freelist.
449 ++numToFillFreeListPage;
450 OwnPtr<PartitionPage*[]> pages = adoptArrayPtr(new PartitionPage*[numToFillF reeListPage]);
451
452 size_t i;
453 for (i = 0; i < numToFillFreeListPage; ++i) {
454 pages[i] = GetFullPage(kTestAllocSize);
455 }
456 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
457 for (i = 0; i < numToFillFreeListPage; ++i)
458 FreeFullPage(pages[i]);
459 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
460 EXPECT_TRUE(bucket->emptyPagesHead);
461
462 // Allocate / free in a different bucket size so we get control of a
463 // different free page list. We need two pages because one will be the last
464 // active page and not get freed.
465 PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
466 PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
467 FreeFullPage(page1);
468 FreeFullPage(page2);
469
470 for (i = 0; i < numToFillFreeListPage; ++i) {
471 pages[i] = GetFullPage(kTestAllocSize);
472 }
473 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
474
475 for (i = 0; i < numToFillFreeListPage; ++i)
476 FreeFullPage(pages[i]);
477 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
478 EXPECT_TRUE(bucket->emptyPagesHead);
479
480 TestShutdown();
481 }
482
483 // Test a large series of allocations that cross more than one underlying
484 // 64KB super page allocation.
485 TEST(PartitionAllocTest, MultiPageAllocs)
486 {
487 TestSetup();
488 // This is guaranteed to cross a super page boundary because the first
489 // partition page "slot" will be taken up by a guard page.
490 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
491 // The super page should begin and end in a guard so we one less page in
492 // order to allocate a single page in the new super page.
493 --numPagesNeeded;
494
495 EXPECT_GT(numPagesNeeded, 1u);
496 OwnPtr<PartitionPage*[]> pages;
497 pages = adoptArrayPtr(new PartitionPage*[numPagesNeeded]);
498 uintptr_t firstSuperPageBase = 0;
499 size_t i;
500 for (i = 0; i < numPagesNeeded; ++i) {
501 pages[i] = GetFullPage(kTestAllocSize);
502 void* storagePtr = partitionPageToPointer(pages[i]);
503 if (!i)
504 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & kSupe rPageBaseMask;
505 if (i == numPagesNeeded - 1) {
506 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storageP tr) & kSuperPageBaseMask;
507 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storag ePtr) & kSuperPageOffsetMask;
508 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
509 // Check that we allocated a guard page for the second page.
510 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset);
511 }
512 }
513 for (i = 0; i < numPagesNeeded; ++i)
514 FreeFullPage(pages[i]);
515
516 TestShutdown();
517 }
518
519 // Test the generic allocation functions that can handle arbitrary sizes and
520 // reallocing etc.
521 TEST(PartitionAllocTest, GenericAlloc)
522 {
523 TestSetup();
524
525 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName);
526 EXPECT_TRUE(ptr);
527 partitionFreeGeneric(genericAllocator.root(), ptr);
528 ptr = partitionAllocGeneric(genericAllocator.root(), kGenericMaxBucketed + 1 , typeName);
529 EXPECT_TRUE(ptr);
530 partitionFreeGeneric(genericAllocator.root(), ptr);
531
532 ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName);
533 EXPECT_TRUE(ptr);
534 void* origPtr = ptr;
535 char* charPtr = static_cast<char*>(ptr);
536 *charPtr = 'A';
537
538 // Change the size of the realloc, remaining inside the same bucket.
539 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2, type Name);
540 EXPECT_EQ(ptr, newPtr);
541 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName);
542 EXPECT_EQ(ptr, newPtr);
543 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmall estBucket, typeName);
544 EXPECT_EQ(ptr, newPtr);
545
546 // Change the size of the realloc, switching buckets.
547 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmall estBucket + 1, typeName);
548 EXPECT_NE(newPtr, ptr);
549 // Check that the realloc copied correctly.
550 char* newCharPtr = static_cast<char*>(newPtr);
551 EXPECT_EQ(*newCharPtr, 'A');
552 #if ENABLE(ASSERT)
553 // Subtle: this checks for an old bug where we copied too much from the
554 // source of the realloc. The condition can be detected by a trashing of
555 // the uninitialized value in the space of the upsized allocation.
556 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + kGen ericSmallestBucket)));
557 #endif
558 *newCharPtr = 'B';
559 // The realloc moved. To check that the old allocation was freed, we can
560 // do an alloc of the old allocation size and check that the old allocation
561 // address is at the head of the freelist and reused.
562 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1, typeName );
563 EXPECT_EQ(reusedPtr, origPtr);
564 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
565
566 // Downsize the realloc.
567 ptr = newPtr;
568 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName);
569 EXPECT_EQ(newPtr, origPtr);
570 newCharPtr = static_cast<char*>(newPtr);
571 EXPECT_EQ(*newCharPtr, 'B');
572 *newCharPtr = 'C';
573
574 // Upsize the realloc to outside the partition.
575 ptr = newPtr;
576 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed + 1, typeName);
577 EXPECT_NE(newPtr, ptr);
578 newCharPtr = static_cast<char*>(newPtr);
579 EXPECT_EQ(*newCharPtr, 'C');
580 *newCharPtr = 'D';
581
582 // Upsize and downsize the realloc, remaining outside the partition.
583 ptr = newPtr;
584 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed * 10, typeName);
585 newCharPtr = static_cast<char*>(newPtr);
586 EXPECT_EQ(*newCharPtr, 'D');
587 *newCharPtr = 'E';
588 ptr = newPtr;
589 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed * 2, typeName);
590 newCharPtr = static_cast<char*>(newPtr);
591 EXPECT_EQ(*newCharPtr, 'E');
592 *newCharPtr = 'F';
593
594 // Downsize the realloc to inside the partition.
595 ptr = newPtr;
596 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName);
597 EXPECT_NE(newPtr, ptr);
598 EXPECT_EQ(newPtr, origPtr);
599 newCharPtr = static_cast<char*>(newPtr);
600 EXPECT_EQ(*newCharPtr, 'F');
601
602 partitionFreeGeneric(genericAllocator.root(), newPtr);
603 TestShutdown();
604 }
605
606 // Test the generic allocation functions can handle some specific sizes of
607 // interest.
608 TEST(PartitionAllocTest, GenericAllocSizes)
609 {
610 TestSetup();
611
612 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0, typeName);
613 EXPECT_TRUE(ptr);
614 partitionFreeGeneric(genericAllocator.root(), ptr);
615
616 // kPartitionPageSize is interesting because it results in just one
617 // allocation per page, which tripped up some corner cases.
618 size_t size = kPartitionPageSize - kExtraAllocSize;
619 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
620 EXPECT_TRUE(ptr);
621 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
622 EXPECT_TRUE(ptr2);
623 partitionFreeGeneric(genericAllocator.root(), ptr);
624 // Should be freeable at this point.
625 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
626 EXPECT_NE(-1, page->emptyCacheIndex);
627 partitionFreeGeneric(genericAllocator.root(), ptr2);
628
629 size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - kSystemPageS ize) / 2) - kExtraAllocSize;
630 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
631 EXPECT_TRUE(ptr);
632 memset(ptr, 'A', size);
633 ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
634 EXPECT_TRUE(ptr2);
635 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
636 EXPECT_TRUE(ptr3);
637 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
638 EXPECT_TRUE(ptr4);
639
640 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
641 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr3));
642 EXPECT_NE(page, page2);
643
644 partitionFreeGeneric(genericAllocator.root(), ptr);
645 partitionFreeGeneric(genericAllocator.root(), ptr3);
646 partitionFreeGeneric(genericAllocator.root(), ptr2);
647 // Should be freeable at this point.
648 EXPECT_NE(-1, page->emptyCacheIndex);
649 EXPECT_EQ(0, page->numAllocatedSlots);
650 EXPECT_EQ(0, page->numUnprovisionedSlots);
651 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName );
652 EXPECT_EQ(ptr3, newPtr);
653 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
654 EXPECT_EQ(ptr2, newPtr);
655 #if OS(LINUX) && !ENABLE(ASSERT)
656 // On Linux, we have a guarantee that freelisting a page should cause its
657 // contents to be nulled out. We check for null here to detect an bug we
658 // had where a large slot size was causing us to not properly free all
659 // resources back to the system.
660 // We only run the check when asserts are disabled because when they are
661 // enabled, the allocated area is overwritten with an "uninitialized"
662 // byte pattern.
663 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
664 #endif
665 partitionFreeGeneric(genericAllocator.root(), newPtr);
666 partitionFreeGeneric(genericAllocator.root(), ptr3);
667 partitionFreeGeneric(genericAllocator.root(), ptr4);
668
669 // Can we allocate a massive (512MB) size?
670 // Allocate 512MB, but +1, to test for cookie writing alignment issues.
671 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024 + 1, typeName);
672 partitionFreeGeneric(genericAllocator.root(), ptr);
673
674 // Check a more reasonable, but still direct mapped, size.
675 // Chop a system page and a byte off to test for rounding errors.
676 size = 20 * 1024 * 1024;
677 size -= kSystemPageSize;
678 size -= 1;
679 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
680 char* charPtr = reinterpret_cast<char*>(ptr);
681 *(charPtr + (size - 1)) = 'A';
682 partitionFreeGeneric(genericAllocator.root(), ptr);
683
684 // Can we free null?
685 partitionFreeGeneric(genericAllocator.root(), 0);
686
687 // Do we correctly get a null for a failed allocation?
688 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), PartitionAl locReturnNull, 3u * 1024 * 1024 * 1024, typeName));
689
690 TestShutdown();
691 }
692
693 // Test that we can fetch the real allocated size after an allocation.
694 TEST(PartitionAllocTest, GenericAllocGetSize)
695 {
696 TestSetup();
697
698 void* ptr;
699 size_t requestedSize, actualSize, predictedSize;
700
701 EXPECT_TRUE(partitionAllocSupportsGetSize());
702
703 // Allocate something small.
704 requestedSize = 511 - kExtraAllocSize;
705 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
706 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName );
707 EXPECT_TRUE(ptr);
708 actualSize = partitionAllocGetSize(ptr);
709 EXPECT_EQ(predictedSize, actualSize);
710 EXPECT_LT(requestedSize, actualSize);
711 partitionFreeGeneric(genericAllocator.root(), ptr);
712
713 // Allocate a size that should be a perfect match for a bucket, because it
714 // is an exact power of 2.
715 requestedSize = (256 * 1024) - kExtraAllocSize;
716 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
717 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName );
718 EXPECT_TRUE(ptr);
719 actualSize = partitionAllocGetSize(ptr);
720 EXPECT_EQ(predictedSize, actualSize);
721 EXPECT_EQ(requestedSize, actualSize);
722 partitionFreeGeneric(genericAllocator.root(), ptr);
723
724 // Allocate a size that is a system page smaller than a bucket. GetSize()
725 // should return a larger size than we asked for now.
726 requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
727 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
728 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName );
729 EXPECT_TRUE(ptr);
730 actualSize = partitionAllocGetSize(ptr);
731 EXPECT_EQ(predictedSize, actualSize);
732 EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
733 // Check that we can write at the end of the reported size too.
734 char* charPtr = reinterpret_cast<char*>(ptr);
735 *(charPtr + (actualSize - 1)) = 'A';
736 partitionFreeGeneric(genericAllocator.root(), ptr);
737
738 // Allocate something very large, and uneven.
739 requestedSize = 512 * 1024 * 1024 - 1;
740 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
741 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName );
742 EXPECT_TRUE(ptr);
743 actualSize = partitionAllocGetSize(ptr);
744 EXPECT_EQ(predictedSize, actualSize);
745 EXPECT_LT(requestedSize, actualSize);
746 partitionFreeGeneric(genericAllocator.root(), ptr);
747
748 // Too large allocation.
749 requestedSize = INT_MAX;
750 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
751 EXPECT_EQ(requestedSize, predictedSize);
752
753 TestShutdown();
754 }
755
756 // Test the realloc() contract.
757 TEST(PartitionAllocTest, Realloc)
758 {
759 TestSetup();
760
761 // realloc(0, size) should be equivalent to malloc().
762 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSi ze, typeName);
763 memset(ptr, 'A', kTestAllocSize);
764 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
765 // realloc(ptr, 0) should be equivalent to free().
766 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0, typeNa me);
767 EXPECT_EQ(0, ptr2);
768 EXPECT_EQ(partitionCookieFreePointerAdjust(ptr), page->freelistHead);
769
770 // Test that growing an allocation with realloc() copies everything from the
771 // old allocation.
772 size_t size = kSystemPageSize - kExtraAllocSize;
773 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
774 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
775 memset(ptr, 'A', size);
776 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeN ame);
777 EXPECT_NE(ptr, ptr2);
778 char* charPtr2 = static_cast<char*>(ptr2);
779 EXPECT_EQ('A', charPtr2[0]);
780 EXPECT_EQ('A', charPtr2[size - 1]);
781 #if ENABLE(ASSERT)
782 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
783 #endif
784
785 // Test that shrinking an allocation with realloc() also copies everything
786 // from the old allocation.
787 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, typeN ame);
788 EXPECT_NE(ptr2, ptr);
789 char* charPtr = static_cast<char*>(ptr);
790 EXPECT_EQ('A', charPtr[0]);
791 EXPECT_EQ('A', charPtr[size - 2]);
792 #if ENABLE(ASSERT)
793 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])) ;
794 #endif
795
796 partitionFreeGeneric(genericAllocator.root(), ptr);
797
798 // Test that shrinking a direct mapped allocation happens in-place.
799 size = kGenericMaxBucketed + 16 * kSystemPageSize;
800 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
801 size_t actualSize = partitionAllocGetSize(ptr);
802 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBuck eted + 8 * kSystemPageSize, typeName);
803 EXPECT_EQ(ptr, ptr2);
804 EXPECT_EQ(actualSize - 8 * kSystemPageSize, partitionAllocGetSize(ptr2));
805
806 // Test that a previously in-place shrunk direct mapped allocation can be
807 // expanded up again within its original size.
808 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - kSystemP ageSize, typeName);
809 EXPECT_EQ(ptr2, ptr);
810 EXPECT_EQ(actualSize - kSystemPageSize, partitionAllocGetSize(ptr));
811
812 // Test that a direct mapped allocation is performed not in-place when the
813 // new size is small enough.
814 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kSystemPageSize , typeName);
815 EXPECT_NE(ptr, ptr2);
816
817 partitionFreeGeneric(genericAllocator.root(), ptr2);
818
819 TestShutdown();
820 }
821
822 // Tests the handing out of freelists for partial pages.
823 TEST(PartitionAllocTest, PartialPageFreelists)
824 {
825 TestSetup();
826
827 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
828 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, bigSize + kExtraAllocSiz e);
829 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
830 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
831 EXPECT_EQ(0, bucket->emptyPagesHead);
832
833 void* ptr = partitionAlloc(allocator.root(), bigSize, typeName);
834 EXPECT_TRUE(ptr);
835
836 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
837 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSi ze) / (bigSize + kExtraAllocSize);
838 EXPECT_EQ(4u, totalSlots);
839 // The freelist should have one entry, because we were able to exactly fit
840 // one object slot and one freelist pointer (the null that the head points
841 // to) into a system page.
842 EXPECT_TRUE(page->freelistHead);
843 EXPECT_EQ(1, page->numAllocatedSlots);
844 EXPECT_EQ(2, page->numUnprovisionedSlots);
845
846 void* ptr2 = partitionAlloc(allocator.root(), bigSize, typeName);
847 EXPECT_TRUE(ptr2);
848 EXPECT_FALSE(page->freelistHead);
849 EXPECT_EQ(2, page->numAllocatedSlots);
850 EXPECT_EQ(2, page->numUnprovisionedSlots);
851
852 void* ptr3 = partitionAlloc(allocator.root(), bigSize, typeName);
853 EXPECT_TRUE(ptr3);
854 EXPECT_TRUE(page->freelistHead);
855 EXPECT_EQ(3, page->numAllocatedSlots);
856 EXPECT_EQ(0, page->numUnprovisionedSlots);
857
858 void* ptr4 = partitionAlloc(allocator.root(), bigSize, typeName);
859 EXPECT_TRUE(ptr4);
860 EXPECT_FALSE(page->freelistHead);
861 EXPECT_EQ(4, page->numAllocatedSlots);
862 EXPECT_EQ(0, page->numUnprovisionedSlots);
863
864 void* ptr5 = partitionAlloc(allocator.root(), bigSize, typeName);
865 EXPECT_TRUE(ptr5);
866
867 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr5));
868 EXPECT_EQ(1, page2->numAllocatedSlots);
869
870 // Churn things a little whilst there's a partial page freelist.
871 partitionFree(ptr);
872 ptr = partitionAlloc(allocator.root(), bigSize, typeName);
873 void* ptr6 = partitionAlloc(allocator.root(), bigSize, typeName);
874
875 partitionFree(ptr);
876 partitionFree(ptr2);
877 partitionFree(ptr3);
878 partitionFree(ptr4);
879 partitionFree(ptr5);
880 partitionFree(ptr6);
881 EXPECT_NE(-1, page->emptyCacheIndex);
882 EXPECT_NE(-1, page2->emptyCacheIndex);
883 EXPECT_TRUE(page2->freelistHead);
884 EXPECT_EQ(0, page2->numAllocatedSlots);
885
886 // And test a couple of sizes that do not cross kSystemPageSize with a singl e allocation.
887 size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
888 bucketIdx = (mediumSize + kExtraAllocSize) >> kBucketShift;
889 bucket = &allocator.root()->buckets()[bucketIdx];
890 EXPECT_EQ(0, bucket->emptyPagesHead);
891
892 ptr = partitionAlloc(allocator.root(), mediumSize, typeName);
893 EXPECT_TRUE(ptr);
894 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
895 EXPECT_EQ(1, page->numAllocatedSlots);
896 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( mediumSize + kExtraAllocSize);
897 size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize);
898 EXPECT_EQ(2u, firstPageSlots);
899 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
900
901 partitionFree(ptr);
902
903 size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
904 bucketIdx = (smallSize + kExtraAllocSize) >> kBucketShift;
905 bucket = &allocator.root()->buckets()[bucketIdx];
906 EXPECT_EQ(0, bucket->emptyPagesHead);
907
908 ptr = partitionAlloc(allocator.root(), smallSize, typeName);
909 EXPECT_TRUE(ptr);
910 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
911 EXPECT_EQ(1, page->numAllocatedSlots);
912 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( smallSize + kExtraAllocSize);
913 firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize);
914 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
915
916 partitionFree(ptr);
917 EXPECT_TRUE(page->freelistHead);
918 EXPECT_EQ(0, page->numAllocatedSlots);
919
920 size_t verySmallSize = 32 - kExtraAllocSize;
921 bucketIdx = (verySmallSize + kExtraAllocSize) >> kBucketShift;
922 bucket = &allocator.root()->buckets()[bucketIdx];
923 EXPECT_EQ(0, bucket->emptyPagesHead);
924
925 ptr = partitionAlloc(allocator.root(), verySmallSize, typeName);
926 EXPECT_TRUE(ptr);
927 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
928 EXPECT_EQ(1, page->numAllocatedSlots);
929 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( verySmallSize + kExtraAllocSize);
930 firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
931 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
932
933 partitionFree(ptr);
934 EXPECT_TRUE(page->freelistHead);
935 EXPECT_EQ(0, page->numAllocatedSlots);
936
937 // And try an allocation size (against the generic allocator) that is
938 // larger than a system page.
939 size_t pageAndAHalfSize = (kSystemPageSize + (kSystemPageSize / 2)) - kExtra AllocSize;
940 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize, typeN ame);
941 EXPECT_TRUE(ptr);
942 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
943 EXPECT_EQ(1, page->numAllocatedSlots);
944 EXPECT_TRUE(page->freelistHead);
945 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( pageAndAHalfSize + kExtraAllocSize);
946 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
947 partitionFreeGeneric(genericAllocator.root(), ptr);
948
949 // And then make sure than exactly the page size only faults one page.
950 size_t pageSize = kSystemPageSize - kExtraAllocSize;
951 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize, typeName);
952 EXPECT_TRUE(ptr);
953 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
954 EXPECT_EQ(1, page->numAllocatedSlots);
955 EXPECT_FALSE(page->freelistHead);
956 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( pageSize + kExtraAllocSize);
957 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
958 partitionFreeGeneric(genericAllocator.root(), ptr);
959
960 TestShutdown();
961 }
962
963 // Test some of the fragmentation-resistant properties of the allocator.
964 TEST(PartitionAllocTest, PageRefilling)
965 {
966 TestSetup();
967 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
968
969 // Grab two full pages and a non-full page.
970 PartitionPage* page1 = GetFullPage(kTestAllocSize);
971 PartitionPage* page2 = GetFullPage(kTestAllocSize);
972 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName);
973 EXPECT_TRUE(ptr);
974 EXPECT_NE(page1, bucket->activePagesHead);
975 EXPECT_NE(page2, bucket->activePagesHead);
976 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
977 EXPECT_EQ(1, page->numAllocatedSlots);
978
979 // Work out a pointer into page2 and free it; and then page1 and free it.
980 char* ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPoint erOffset;
981 partitionFree(ptr2);
982 ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffs et;
983 partitionFree(ptr2);
984
985 // If we perform two allocations from the same bucket now, we expect to
986 // refill both the nearly full pages.
987 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName);
988 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName);
989 EXPECT_EQ(1, page->numAllocatedSlots);
990
991 FreeFullPage(page2);
992 FreeFullPage(page1);
993 partitionFree(ptr);
994
995 TestShutdown();
996 }
997
998 // Basic tests to ensure that allocations work for partial page buckets.
999 TEST(PartitionAllocTest, PartialPages)
1000 {
1001 TestSetup();
1002
1003 // Find a size that is backed by a partial partition page.
1004 size_t size = sizeof(void*);
1005 PartitionBucket* bucket = 0;
1006 while (size < kTestMaxAllocation) {
1007 bucket = &allocator.root()->buckets()[size >> kBucketShift];
1008 if (bucket->numSystemPagesPerSlotSpan % kNumSystemPagesPerPartitionPage)
1009 break;
1010 size += sizeof(void*);
1011 }
1012 EXPECT_LT(size, kTestMaxAllocation);
1013
1014 PartitionPage* page1 = GetFullPage(size);
1015 PartitionPage* page2 = GetFullPage(size);
1016 FreeFullPage(page2);
1017 FreeFullPage(page1);
1018
1019 TestShutdown();
1020 }
1021
1022 // Test correct handling if our mapping collides with another.
1023 TEST(PartitionAllocTest, MappingCollision)
1024 {
1025 TestSetup();
1026 // The -2 is because the first and last partition pages in a super page are
1027 // guard pages.
1028 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
1029 OwnPtr<PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new PartitionPa ge*[numPartitionPagesNeeded]);
1030 OwnPtr<PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new PartitionP age*[numPartitionPagesNeeded]);
1031
1032 size_t i;
1033 for (i = 0; i < numPartitionPagesNeeded; ++i)
1034 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
1035
1036 char* pageBase = reinterpret_cast<char*>(partitionPageToPointer(firstSuperPa gePages[0]));
1037 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuper PageOffsetMask);
1038 pageBase -= kPartitionPageSize;
1039 // Map a single system page either side of the mapping for our allocations,
1040 // with the goal of tripping up alignment of the next mapping.
1041 void* map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocati onGranularity, kPageAllocationGranularity, PageInaccessible);
1042 EXPECT_TRUE(map1);
1043 void* map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularit y, kPageAllocationGranularity, PageInaccessible);
1044 EXPECT_TRUE(map2);
1045
1046 for (i = 0; i < numPartitionPagesNeeded; ++i)
1047 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
1048
1049 freePages(map1, kPageAllocationGranularity);
1050 freePages(map2, kPageAllocationGranularity);
1051
1052 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePag es[0]));
1053 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuper PageOffsetMask);
1054 pageBase -= kPartitionPageSize;
1055 // Map a single system page either side of the mapping for our allocations,
1056 // with the goal of tripping up alignment of the next mapping.
1057 map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocationGran ularity, kPageAllocationGranularity, PageAccessible);
1058 EXPECT_TRUE(map1);
1059 map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPa geAllocationGranularity, PageAccessible);
1060 EXPECT_TRUE(map2);
1061 setSystemPagesInaccessible(map1, kPageAllocationGranularity);
1062 setSystemPagesInaccessible(map2, kPageAllocationGranularity);
1063
1064 PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
1065 freePages(map1, kPageAllocationGranularity);
1066 freePages(map2, kPageAllocationGranularity);
1067
1068 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThird SuperPage)) & kPartitionPageOffsetMask);
1069
1070 // And make sure we really did get a page in a new superpage.
1071 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPageP ages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPoint er(pageInThirdSuperPage)) & kSuperPageBaseMask);
1072 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPage Pages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPoin ter(pageInThirdSuperPage)) & kSuperPageBaseMask);
1073
1074 FreeFullPage(pageInThirdSuperPage);
1075 for (i = 0; i < numPartitionPagesNeeded; ++i) {
1076 FreeFullPage(firstSuperPagePages[i]);
1077 FreeFullPage(secondSuperPagePages[i]);
1078 }
1079
1080 TestShutdown();
1081 }
1082
1083 // Tests that pages in the free page cache do get freed as appropriate.
1084 TEST(PartitionAllocTest, FreeCache)
1085 {
1086 TestSetup();
1087
1088 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
1089
1090 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
1091 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
1092 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
1093
1094 void* ptr = partitionAlloc(allocator.root(), bigSize, typeName);
1095 EXPECT_TRUE(ptr);
1096 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
1097 EXPECT_EQ(0, bucket->emptyPagesHead);
1098 EXPECT_EQ(1, page->numAllocatedSlots);
1099 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1100 partitionFree(ptr);
1101 EXPECT_EQ(0, page->numAllocatedSlots);
1102 EXPECT_NE(-1, page->emptyCacheIndex);
1103 EXPECT_TRUE(page->freelistHead);
1104
1105 CycleFreeCache(kTestAllocSize);
1106
1107 // Flushing the cache should have really freed the unused page.
1108 EXPECT_FALSE(page->freelistHead);
1109 EXPECT_EQ(-1, page->emptyCacheIndex);
1110 EXPECT_EQ(0, page->numAllocatedSlots);
1111 PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBu cketIndex];
1112 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
1113
1114 // Check that an allocation works ok whilst in this state (a free'd page
1115 // as the active pages head).
1116 ptr = partitionAlloc(allocator.root(), bigSize, typeName);
1117 EXPECT_FALSE(bucket->emptyPagesHead);
1118 partitionFree(ptr);
1119
1120 // Also check that a page that is bouncing immediately between empty and
1121 // used does not get freed.
1122 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
1123 ptr = partitionAlloc(allocator.root(), bigSize, typeName);
1124 EXPECT_TRUE(page->freelistHead);
1125 partitionFree(ptr);
1126 EXPECT_TRUE(page->freelistHead);
1127 }
1128 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1129 TestShutdown();
1130 }
1131
1132 // Tests for a bug we had with losing references to free pages.
1133 TEST(PartitionAllocTest, LostFreePagesBug)
1134 {
1135 TestSetup();
1136
1137 size_t size = kPartitionPageSize - kExtraAllocSize;
1138
1139 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1140 EXPECT_TRUE(ptr);
1141 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1142 EXPECT_TRUE(ptr2);
1143
1144 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
1145 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr2));
1146 PartitionBucket* bucket = page->bucket;
1147
1148 EXPECT_EQ(0, bucket->emptyPagesHead);
1149 EXPECT_EQ(-1, page->numAllocatedSlots);
1150 EXPECT_EQ(1, page2->numAllocatedSlots);
1151
1152 partitionFreeGeneric(genericAllocator.root(), ptr);
1153 partitionFreeGeneric(genericAllocator.root(), ptr2);
1154
1155 EXPECT_TRUE(bucket->emptyPagesHead);
1156 EXPECT_TRUE(bucket->emptyPagesHead->nextPage);
1157 EXPECT_EQ(0, page->numAllocatedSlots);
1158 EXPECT_EQ(0, page2->numAllocatedSlots);
1159 EXPECT_TRUE(page->freelistHead);
1160 EXPECT_TRUE(page2->freelistHead);
1161
1162 CycleGenericFreeCache(kTestAllocSize);
1163
1164 EXPECT_FALSE(page->freelistHead);
1165 EXPECT_FALSE(page2->freelistHead);
1166
1167 EXPECT_TRUE(bucket->emptyPagesHead);
1168 EXPECT_TRUE(bucket->emptyPagesHead->nextPage);
1169 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
1170
1171 // At this moment, we have two decommitted pages, on the empty list.
1172 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1173 EXPECT_TRUE(ptr);
1174 partitionFreeGeneric(genericAllocator.root(), ptr);
1175
1176 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
1177 EXPECT_TRUE(bucket->emptyPagesHead);
1178 EXPECT_TRUE(bucket->decommittedPagesHead);
1179
1180 CycleGenericFreeCache(kTestAllocSize);
1181
1182 // We're now set up to trigger a historical bug by scanning over the active
1183 // pages list. The current code gets into a different state, but we'll keep
1184 // the test as being an interesting corner case.
1185 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1186 EXPECT_TRUE(ptr);
1187 partitionFreeGeneric(genericAllocator.root(), ptr);
1188
1189 EXPECT_TRUE(bucket->activePagesHead);
1190 EXPECT_TRUE(bucket->emptyPagesHead);
1191 EXPECT_TRUE(bucket->decommittedPagesHead);
1192
1193 TestShutdown();
1194 }
1195
1196 #if !CPU(64BIT) || OS(POSIX)
1197
1198 static void DoReturnNullTest(size_t allocSize)
1199 {
1200 TestSetup();
1201
1202 EXPECT_TRUE(SetAddressSpaceLimit());
1203
1204 // Work out the number of allocations for 6 GB of memory.
1205 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
1206
1207 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric(genericAllocato r.root(), numAllocations * sizeof(void*), typeName));
1208 int i;
1209
1210 for (i = 0; i < numAllocations; ++i) {
1211 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionA llocReturnNull, allocSize, typeName);
1212 if (!i)
1213 EXPECT_TRUE(ptrs[0]);
1214 if (!ptrs[i]) {
1215 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), Partit ionAllocReturnNull, allocSize, typeName);
1216 EXPECT_FALSE(ptrs[i]);
1217 break;
1218 }
1219 }
1220
1221 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
1222 // we're not actually testing anything here.
1223 EXPECT_LT(i, numAllocations);
1224
1225 // Free, reallocate and free again each block we allocated. We do this to
1226 // check that freeing memory also works correctly after a failed allocation.
1227 for (--i; i >= 0; --i) {
1228 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1229 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionA llocReturnNull, allocSize, typeName);
1230 EXPECT_TRUE(ptrs[i]);
1231 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1232 }
1233
1234 partitionFreeGeneric(genericAllocator.root(), ptrs);
1235
1236 EXPECT_TRUE(ClearAddressSpaceLimit());
1237
1238 TestShutdown();
1239 }
1240
1241 // Tests that if an allocation fails in "return null" mode, repeating it doesn't
1242 // crash, and still returns null. The test tries to allocate 6 GB of memory in
1243 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB
1244 // using setrlimit() first.
1245 #if OS(MACOSX)
1246 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull
1247 #else
1248 #define MAYBE_RepeatedReturnNull RepeatedReturnNull
1249 #endif
1250 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull)
1251 {
1252 // A single-slot but non-direct-mapped allocation size.
1253 DoReturnNullTest(512 * 1024);
1254 }
1255
1256 // Another "return null" test but for larger, direct-mapped allocations.
1257 #if OS(MACOSX)
1258 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
1259 #else
1260 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
1261 #endif
1262 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect)
1263 {
1264 // A direct-mapped allocation size.
1265 DoReturnNullTest(256 * 1024 * 1024);
1266 }
1267
1268 #endif // !CPU(64BIT) || OS(POSIX)
1269
1270 #if !OS(ANDROID)
1271
1272 // Make sure that malloc(-1) dies.
1273 // In the past, we had an integer overflow that would alias malloc(-1) to
1274 // malloc(0), which is not good.
1275 TEST(PartitionAllocDeathTest, LargeAllocs)
1276 {
1277 TestSetup();
1278 // Largest alloc.
1279 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size _t>(-1), typeName), "");
1280 // And the smallest allocation we expect to die.
1281 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size _t>(INT_MAX) + 1, typeName), "");
1282
1283 TestShutdown();
1284 }
1285
1286 // Check that our immediate double-free detection works.
1287 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
1288 {
1289 TestSetup();
1290
1291 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, t ypeName);
1292 EXPECT_TRUE(ptr);
1293 partitionFreeGeneric(genericAllocator.root(), ptr);
1294
1295 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1296
1297 TestShutdown();
1298 }
1299
1300 // Check that our refcount-based double-free detection works.
1301 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
1302 {
1303 TestSetup();
1304
1305 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, t ypeName);
1306 EXPECT_TRUE(ptr);
1307 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName);
1308 EXPECT_TRUE(ptr2);
1309 partitionFreeGeneric(genericAllocator.root(), ptr);
1310 partitionFreeGeneric(genericAllocator.root(), ptr2);
1311 // This is not an immediate double-free so our immediate detection won't
1312 // fire. However, it does take the "refcount" of the partition page to -1,
1313 // which is illegal and should be trapped.
1314 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1315
1316 TestShutdown();
1317 }
1318
1319 // Check that guard pages are present where expected.
1320 TEST(PartitionAllocDeathTest, GuardPages)
1321 {
1322 TestSetup();
1323
1324 // partitionAlloc adds kPartitionPageSize to the requested size
1325 // (for metadata), and then rounds that size to kPageAllocationGranularity.
1326 // To be able to reliably write one past a direct allocation, choose a size
1327 // that's
1328 // a) larger than kGenericMaxBucketed (to make the allocation direct)
1329 // b) aligned at kPageAllocationGranularity boundaries after
1330 // kPartitionPageSize has been added to it.
1331 // (On 32-bit, partitionAlloc adds another kSystemPageSize to the
1332 // allocation size before rounding, but there it marks the memory right
1333 // after size as inaccessible, so it's fine to write 1 past the size we
1334 // hand to partitionAlloc and we don't need to worry about allocation
1335 // granularities.)
1336 #define ALIGN(N, A) (((N) + (A) - 1) / (A) * (A))
1337 const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, kPageA llocationGranularity) - kPartitionPageSize;
1338 #undef ALIGN
1339 static_assert(kSize > kGenericMaxBucketed, "allocation not large enough for direct allocation");
1340 size_t size = kSize - kExtraAllocSize;
1341 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1342
1343 EXPECT_TRUE(ptr);
1344 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1345
1346 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1347 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1348
1349 partitionFreeGeneric(genericAllocator.root(), ptr);
1350
1351 TestShutdown();
1352 }
1353
1354 // Check that a bad free() is caught where the free() refers to an unused
1355 // partition page of a large allocation.
1356 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage)
1357 {
1358 TestSetup();
1359
1360 // This large size will result in a direct mapped allocation with guard
1361 // pages at either end.
1362 void* ptr = partitionAllocGeneric(genericAllocator.root(), kPartitionPageSiz e * 2, typeName);
1363 EXPECT_TRUE(ptr);
1364 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
1365
1366 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), "");
1367
1368 partitionFreeGeneric(genericAllocator.root(), ptr);
1369
1370 TestShutdown();
1371 }
1372
1373 #endif // !OS(ANDROID)
1374
1375 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without
1376 // crashing and returns non zero values when memory is allocated.
1377 TEST(PartitionAllocTest, DumpMemoryStats)
1378 {
1379 TestSetup();
1380 {
1381 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName);
1382 MockPartitionStatsDumper mockStatsDumper;
1383 partitionDumpStats(allocator.root(), "mock_allocator", false /* detailed dump */, &mockStatsDumper);
1384 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded());
1385
1386 partitionFree(ptr);
1387 }
1388
1389 // This series of tests checks the active -> empty -> decommitted states.
1390 {
1391 void* genericPtr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllocSize, typeName);
1392 {
1393 MockPartitionStatsDumper mockStatsDumperGeneric;
1394 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1395 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1396
1397 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1398 EXPECT_TRUE(stats);
1399 EXPECT_TRUE(stats->isValid);
1400 EXPECT_EQ(2048u, stats->bucketSlotSize);
1401 EXPECT_EQ(2048u, stats->activeBytes);
1402 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1403 EXPECT_EQ(0u, stats->decommittableBytes);
1404 EXPECT_EQ(0u, stats->discardableBytes);
1405 EXPECT_EQ(0u, stats->numFullPages);
1406 EXPECT_EQ(1u, stats->numActivePages);
1407 EXPECT_EQ(0u, stats->numEmptyPages);
1408 EXPECT_EQ(0u, stats->numDecommittedPages);
1409 }
1410
1411 partitionFreeGeneric(genericAllocator.root(), genericPtr);
1412
1413 {
1414 MockPartitionStatsDumper mockStatsDumperGeneric;
1415 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1416 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1417
1418 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1419 EXPECT_TRUE(stats);
1420 EXPECT_TRUE(stats->isValid);
1421 EXPECT_EQ(2048u, stats->bucketSlotSize);
1422 EXPECT_EQ(0u, stats->activeBytes);
1423 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1424 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes);
1425 EXPECT_EQ(0u, stats->discardableBytes);
1426 EXPECT_EQ(0u, stats->numFullPages);
1427 EXPECT_EQ(0u, stats->numActivePages);
1428 EXPECT_EQ(1u, stats->numEmptyPages);
1429 EXPECT_EQ(0u, stats->numDecommittedPages);
1430 }
1431
1432 CycleGenericFreeCache(kTestAllocSize);
1433
1434 {
1435 MockPartitionStatsDumper mockStatsDumperGeneric;
1436 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1437 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1438
1439 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1440 EXPECT_TRUE(stats);
1441 EXPECT_TRUE(stats->isValid);
1442 EXPECT_EQ(2048u, stats->bucketSlotSize);
1443 EXPECT_EQ(0u, stats->activeBytes);
1444 EXPECT_EQ(0u, stats->residentBytes);
1445 EXPECT_EQ(0u, stats->decommittableBytes);
1446 EXPECT_EQ(0u, stats->discardableBytes);
1447 EXPECT_EQ(0u, stats->numFullPages);
1448 EXPECT_EQ(0u, stats->numActivePages);
1449 EXPECT_EQ(0u, stats->numEmptyPages);
1450 EXPECT_EQ(1u, stats->numDecommittedPages);
1451 }
1452 }
1453
1454 // This test checks for correct empty page list accounting.
1455 {
1456 size_t size = kPartitionPageSize - kExtraAllocSize;
1457 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeNa me);
1458 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeNa me);
1459 partitionFreeGeneric(genericAllocator.root(), ptr1);
1460 partitionFreeGeneric(genericAllocator.root(), ptr2);
1461
1462 CycleGenericFreeCache(kTestAllocSize);
1463
1464 ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1465
1466 {
1467 MockPartitionStatsDumper mockStatsDumperGeneric;
1468 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1469 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1470
1471 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kPartitionPageSize);
1472 EXPECT_TRUE(stats);
1473 EXPECT_TRUE(stats->isValid);
1474 EXPECT_EQ(kPartitionPageSize, stats->bucketSlotSize);
1475 EXPECT_EQ(kPartitionPageSize, stats->activeBytes);
1476 EXPECT_EQ(kPartitionPageSize, stats->residentBytes);
1477 EXPECT_EQ(0u, stats->decommittableBytes);
1478 EXPECT_EQ(0u, stats->discardableBytes);
1479 EXPECT_EQ(1u, stats->numFullPages);
1480 EXPECT_EQ(0u, stats->numActivePages);
1481 EXPECT_EQ(0u, stats->numEmptyPages);
1482 EXPECT_EQ(1u, stats->numDecommittedPages);
1483 }
1484 partitionFreeGeneric(genericAllocator.root(), ptr1);
1485 }
1486
1487 // This test checks for correct direct mapped accounting.
1488 {
1489 size_t sizeSmaller = kGenericMaxBucketed + 1;
1490 size_t sizeBigger = (kGenericMaxBucketed * 2) + 1;
1491 size_t realSizeSmaller = (sizeSmaller + kSystemPageOffsetMask) & kSystem PageBaseMask;
1492 size_t realSizeBigger = (sizeBigger + kSystemPageOffsetMask) & kSystemPa geBaseMask;
1493 void* ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeName);
1494 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeName);
1495
1496 {
1497 MockPartitionStatsDumper mockStatsDumperGeneric;
1498 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1499 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1500
1501 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(realSizeSmaller);
1502 EXPECT_TRUE(stats);
1503 EXPECT_TRUE(stats->isValid);
1504 EXPECT_TRUE(stats->isDirectMap);
1505 EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize);
1506 EXPECT_EQ(realSizeSmaller, stats->activeBytes);
1507 EXPECT_EQ(realSizeSmaller, stats->residentBytes);
1508 EXPECT_EQ(0u, stats->decommittableBytes);
1509 EXPECT_EQ(0u, stats->discardableBytes);
1510 EXPECT_EQ(1u, stats->numFullPages);
1511 EXPECT_EQ(0u, stats->numActivePages);
1512 EXPECT_EQ(0u, stats->numEmptyPages);
1513 EXPECT_EQ(0u, stats->numDecommittedPages);
1514
1515 stats = mockStatsDumperGeneric.GetBucketStats(realSizeBigger);
1516 EXPECT_TRUE(stats);
1517 EXPECT_TRUE(stats->isValid);
1518 EXPECT_TRUE(stats->isDirectMap);
1519 EXPECT_EQ(realSizeBigger, stats->bucketSlotSize);
1520 EXPECT_EQ(realSizeBigger, stats->activeBytes);
1521 EXPECT_EQ(realSizeBigger, stats->residentBytes);
1522 EXPECT_EQ(0u, stats->decommittableBytes);
1523 EXPECT_EQ(0u, stats->discardableBytes);
1524 EXPECT_EQ(1u, stats->numFullPages);
1525 EXPECT_EQ(0u, stats->numActivePages);
1526 EXPECT_EQ(0u, stats->numEmptyPages);
1527 EXPECT_EQ(0u, stats->numDecommittedPages);
1528 }
1529
1530 partitionFreeGeneric(genericAllocator.root(), ptr2);
1531 partitionFreeGeneric(genericAllocator.root(), ptr);
1532
1533 // Whilst we're here, allocate again and free with different ordering
1534 // to give a workout to our linked list code.
1535 ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeNa me);
1536 ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeNa me);
1537 partitionFreeGeneric(genericAllocator.root(), ptr);
1538 partitionFreeGeneric(genericAllocator.root(), ptr2);
1539 }
1540
1541 // This test checks large-but-not-quite-direct allocations.
1542 {
1543 void* ptr = partitionAllocGeneric(genericAllocator.root(), 65536 + 1, ty peName);
1544
1545 {
1546 MockPartitionStatsDumper mockStatsDumperGeneric;
1547 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1548 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1549
1550 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1551 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(slotSize);
1552 EXPECT_TRUE(stats);
1553 EXPECT_TRUE(stats->isValid);
1554 EXPECT_FALSE(stats->isDirectMap);
1555 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1556 EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->activeBytes);
1557 EXPECT_EQ(slotSize, stats->residentBytes);
1558 EXPECT_EQ(0u, stats->decommittableBytes);
1559 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1560 EXPECT_EQ(1u, stats->numFullPages);
1561 EXPECT_EQ(0u, stats->numActivePages);
1562 EXPECT_EQ(0u, stats->numEmptyPages);
1563 EXPECT_EQ(0u, stats->numDecommittedPages);
1564 }
1565
1566 partitionFreeGeneric(genericAllocator.root(), ptr);
1567
1568 {
1569 MockPartitionStatsDumper mockStatsDumperGeneric;
1570 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1571 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1572
1573 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1574 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(slotSize);
1575 EXPECT_TRUE(stats);
1576 EXPECT_TRUE(stats->isValid);
1577 EXPECT_FALSE(stats->isDirectMap);
1578 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1579 EXPECT_EQ(0u, stats->activeBytes);
1580 EXPECT_EQ(slotSize, stats->residentBytes);
1581 EXPECT_EQ(slotSize, stats->decommittableBytes);
1582 EXPECT_EQ(0u, stats->numFullPages);
1583 EXPECT_EQ(0u, stats->numActivePages);
1584 EXPECT_EQ(1u, stats->numEmptyPages);
1585 EXPECT_EQ(0u, stats->numDecommittedPages);
1586 }
1587
1588 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), 65536 + kSys temPageSize + 1, typeName);
1589 EXPECT_EQ(ptr, ptr2);
1590
1591 {
1592 MockPartitionStatsDumper mockStatsDumperGeneric;
1593 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1594 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1595
1596 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1597 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(slotSize);
1598 EXPECT_TRUE(stats);
1599 EXPECT_TRUE(stats->isValid);
1600 EXPECT_FALSE(stats->isDirectMap);
1601 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1602 EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize, stats->act iveBytes);
1603 EXPECT_EQ(slotSize, stats->residentBytes);
1604 EXPECT_EQ(0u, stats->decommittableBytes);
1605 EXPECT_EQ(0u, stats->discardableBytes);
1606 EXPECT_EQ(1u, stats->numFullPages);
1607 EXPECT_EQ(0u, stats->numActivePages);
1608 EXPECT_EQ(0u, stats->numEmptyPages);
1609 EXPECT_EQ(0u, stats->numDecommittedPages);
1610 }
1611
1612 partitionFreeGeneric(genericAllocator.root(), ptr2);
1613 }
1614
1615 TestShutdown();
1616 }
1617
1618 // Tests the API to purge freeable memory.
1619 TEST(PartitionAllocTest, Purge)
1620 {
1621 TestSetup();
1622
1623 char* ptr = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.r oot(), 2048 - kExtraAllocSize, typeName));
1624 partitionFreeGeneric(genericAllocator.root(), ptr);
1625 {
1626 MockPartitionStatsDumper mockStatsDumperGeneric;
1627 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", false /* detailed dump */, &mockStatsDumperGeneric);
1628 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1629
1630 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBuck etStats(2048);
1631 EXPECT_TRUE(stats);
1632 EXPECT_TRUE(stats->isValid);
1633 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes);
1634 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1635 }
1636 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitE mptyPages);
1637 {
1638 MockPartitionStatsDumper mockStatsDumperGeneric;
1639 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", false /* detailed dump */, &mockStatsDumperGeneric);
1640 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1641
1642 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBuck etStats(2048);
1643 EXPECT_TRUE(stats);
1644 EXPECT_TRUE(stats->isValid);
1645 EXPECT_EQ(0u, stats->decommittableBytes);
1646 EXPECT_EQ(0u, stats->residentBytes);
1647 }
1648 // Calling purge again here is a good way of testing we didn't mess up the
1649 // state of the free cache ring.
1650 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitE mptyPages);
1651
1652 char* bigPtr = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocato r.root(), 256 * 1024, typeName));
1653 partitionFreeGeneric(genericAllocator.root(), bigPtr);
1654 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitE mptyPages);
1655
1656 CheckPageInCore(ptr - kPointerOffset, false);
1657 CheckPageInCore(bigPtr - kPointerOffset, false);
1658
1659 TestShutdown();
1660 }
1661
1662 // Tests that we prefer to allocate into a non-empty partition page over an
1663 // empty one. This is an important aspect of minimizing memory usage for some
1664 // allocation sizes, particularly larger ones.
1665 TEST(PartitionAllocTest, PreferActiveOverEmpty)
1666 {
1667 TestSetup();
1668
1669 size_t size = (kSystemPageSize * 2) - kExtraAllocSize;
1670 // Allocate 3 full slot spans worth of 8192-byte allocations.
1671 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
1672 // slots.
1673 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1674 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1675 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1676 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1677 void* ptr5 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1678 void* ptr6 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1679
1680 PartitionPage* page1 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr1));
1681 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr3));
1682 PartitionPage* page3 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr6));
1683 EXPECT_NE(page1, page2);
1684 EXPECT_NE(page2, page3);
1685 PartitionBucket* bucket = page1->bucket;
1686 EXPECT_EQ(page3, bucket->activePagesHead);
1687
1688 // Free up the 2nd slot in each slot span.
1689 // This leaves the active list containing 3 pages, each with 1 used and 1
1690 // free slot. The active page will be the one containing ptr1.
1691 partitionFreeGeneric(genericAllocator.root(), ptr6);
1692 partitionFreeGeneric(genericAllocator.root(), ptr4);
1693 partitionFreeGeneric(genericAllocator.root(), ptr2);
1694 EXPECT_EQ(page1, bucket->activePagesHead);
1695
1696 // Empty the middle page in the active list.
1697 partitionFreeGeneric(genericAllocator.root(), ptr3);
1698 EXPECT_EQ(page1, bucket->activePagesHead);
1699
1700 // Empty the the first page in the active list -- also the current page.
1701 partitionFreeGeneric(genericAllocator.root(), ptr1);
1702
1703 // A good choice here is to re-fill the third page since the first two are
1704 // empty. We used to fail that.
1705 void* ptr7 = partitionAllocGeneric(genericAllocator.root(), size, typeName);
1706 EXPECT_EQ(ptr6, ptr7);
1707 EXPECT_EQ(page3, bucket->activePagesHead);
1708
1709 partitionFreeGeneric(genericAllocator.root(), ptr5);
1710 partitionFreeGeneric(genericAllocator.root(), ptr7);
1711
1712 TestShutdown();
1713 }
1714
1715 // Tests the API to purge discardable memory.
1716 TEST(PartitionAllocTest, PurgeDiscardable)
1717 {
1718 TestSetup();
1719
1720 // Free the second of two 4096 byte allocations and then purge.
1721 {
1722 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1723 char* ptr2 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), kSystemPageSize - kExtraAllocSize, typeName));
1724 partitionFreeGeneric(genericAllocator.root(), ptr2);
1725 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerA djust(ptr1));
1726 EXPECT_EQ(2u, page->numUnprovisionedSlots);
1727 {
1728 MockPartitionStatsDumper mockStatsDumperGeneric;
1729 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1730 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1731
1732 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kSystemPageSize);
1733 EXPECT_TRUE(stats);
1734 EXPECT_TRUE(stats->isValid);
1735 EXPECT_EQ(0u, stats->decommittableBytes);
1736 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1737 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1738 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes);
1739 }
1740 CheckPageInCore(ptr2 - kPointerOffset, true);
1741 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1742 CheckPageInCore(ptr2 - kPointerOffset, false);
1743 EXPECT_EQ(3u, page->numUnprovisionedSlots);
1744
1745 partitionFreeGeneric(genericAllocator.root(), ptr1);
1746 }
1747 // Free the first of two 4096 byte allocations and then purge.
1748 {
1749 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), kSystemPageSize - kExtraAllocSize, typeName));
1750 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1751 partitionFreeGeneric(genericAllocator.root(), ptr1);
1752 {
1753 MockPartitionStatsDumper mockStatsDumperGeneric;
1754 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1755 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1756
1757 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kSystemPageSize);
1758 EXPECT_TRUE(stats);
1759 EXPECT_TRUE(stats->isValid);
1760 EXPECT_EQ(0u, stats->decommittableBytes);
1761 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1762 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1763 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes);
1764 }
1765 CheckPageInCore(ptr1 - kPointerOffset, true);
1766 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1767 CheckPageInCore(ptr1 - kPointerOffset, false);
1768
1769 partitionFreeGeneric(genericAllocator.root(), ptr2);
1770 }
1771 {
1772 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), 9216 - kExtraAllocSize, typeName));
1773 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtr aAllocSize, typeName);
1774 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtr aAllocSize, typeName);
1775 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtr aAllocSize, typeName);
1776 memset(ptr1, 'A', 9216 - kExtraAllocSize);
1777 memset(ptr2, 'A', 9216 - kExtraAllocSize);
1778 partitionFreeGeneric(genericAllocator.root(), ptr2);
1779 partitionFreeGeneric(genericAllocator.root(), ptr1);
1780 {
1781 MockPartitionStatsDumper mockStatsDumperGeneric;
1782 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1783 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1784
1785 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(9216);
1786 EXPECT_TRUE(stats);
1787 EXPECT_TRUE(stats->isValid);
1788 EXPECT_EQ(0u, stats->decommittableBytes);
1789 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1790 EXPECT_EQ(9216u * 2, stats->activeBytes);
1791 EXPECT_EQ(9 * kSystemPageSize, stats->residentBytes);
1792 }
1793 CheckPageInCore(ptr1 - kPointerOffset, true);
1794 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1795 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1796 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1797 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
1798 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1799 CheckPageInCore(ptr1 - kPointerOffset, true);
1800 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
1801 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1802 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1803 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
1804
1805 partitionFreeGeneric(genericAllocator.root(), ptr3);
1806 partitionFreeGeneric(genericAllocator.root(), ptr4);
1807 }
1808 {
1809 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), (64 * kSystemPageSize) - kExtraAllocSize, typeName));
1810 memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
1811 partitionFreeGeneric(genericAllocator.root(), ptr1);
1812 ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.ro ot(), (61 * kSystemPageSize) - kExtraAllocSize, typeName));
1813 {
1814 MockPartitionStatsDumper mockStatsDumperGeneric;
1815 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1816 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1817
1818 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(64 * kSystemPageSize);
1819 EXPECT_TRUE(stats);
1820 EXPECT_TRUE(stats->isValid);
1821 EXPECT_EQ(0u, stats->decommittableBytes);
1822 EXPECT_EQ(3 * kSystemPageSize, stats->discardableBytes);
1823 EXPECT_EQ(61 * kSystemPageSize, stats->activeBytes);
1824 EXPECT_EQ(64 * kSystemPageSize, stats->residentBytes);
1825 }
1826 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
1827 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
1828 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
1829 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
1830 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1831 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
1832 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
1833 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
1834 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
1835
1836 partitionFreeGeneric(genericAllocator.root(), ptr1);
1837 }
1838 // This sub-test tests truncation of the provisioned slots in a trickier
1839 // case where the freelist is rewritten.
1840 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitE mptyPages);
1841 {
1842 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), kSystemPageSize - kExtraAllocSize, typeName));
1843 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1844 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1845 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1846 ptr1[0] = 'A';
1847 ptr1[kSystemPageSize] = 'A';
1848 ptr1[kSystemPageSize * 2] = 'A';
1849 ptr1[kSystemPageSize * 3] = 'A';
1850 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerA djust(ptr1));
1851 partitionFreeGeneric(genericAllocator.root(), ptr2);
1852 partitionFreeGeneric(genericAllocator.root(), ptr4);
1853 partitionFreeGeneric(genericAllocator.root(), ptr1);
1854 EXPECT_EQ(0u, page->numUnprovisionedSlots);
1855
1856 {
1857 MockPartitionStatsDumper mockStatsDumperGeneric;
1858 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1859 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1860
1861 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kSystemPageSize);
1862 EXPECT_TRUE(stats);
1863 EXPECT_TRUE(stats->isValid);
1864 EXPECT_EQ(0u, stats->decommittableBytes);
1865 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1866 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1867 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes);
1868 }
1869 CheckPageInCore(ptr1 - kPointerOffset, true);
1870 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1871 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1872 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1873 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1874 EXPECT_EQ(1u, page->numUnprovisionedSlots);
1875 CheckPageInCore(ptr1 - kPointerOffset, true);
1876 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
1877 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1878 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1879
1880 // Let's check we didn't brick the freelist.
1881 void* ptr1b = partitionAllocGeneric(genericAllocator.root(), kSystemPage Size - kExtraAllocSize, typeName);
1882 EXPECT_EQ(ptr1, ptr1b);
1883 void* ptr2b = partitionAllocGeneric(genericAllocator.root(), kSystemPage Size - kExtraAllocSize, typeName);
1884 EXPECT_EQ(ptr2, ptr2b);
1885 EXPECT_FALSE(page->freelistHead);
1886
1887 partitionFreeGeneric(genericAllocator.root(), ptr1);
1888 partitionFreeGeneric(genericAllocator.root(), ptr2);
1889 partitionFreeGeneric(genericAllocator.root(), ptr3);
1890 }
1891 // This sub-test is similar, but tests a double-truncation.
1892 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitE mptyPages);
1893 {
1894 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAlloca tor.root(), kSystemPageSize - kExtraAllocSize, typeName));
1895 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1896 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1897 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), kSystemPageS ize - kExtraAllocSize, typeName);
1898 ptr1[0] = 'A';
1899 ptr1[kSystemPageSize] = 'A';
1900 ptr1[kSystemPageSize * 2] = 'A';
1901 ptr1[kSystemPageSize * 3] = 'A';
1902 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerA djust(ptr1));
1903 partitionFreeGeneric(genericAllocator.root(), ptr4);
1904 partitionFreeGeneric(genericAllocator.root(), ptr3);
1905 EXPECT_EQ(0u, page->numUnprovisionedSlots);
1906
1907 {
1908 MockPartitionStatsDumper mockStatsDumperGeneric;
1909 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", false /* detailed dump */, &mockStatsDumperGeneric);
1910 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1911
1912 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kSystemPageSize);
1913 EXPECT_TRUE(stats);
1914 EXPECT_TRUE(stats->isValid);
1915 EXPECT_EQ(0u, stats->decommittableBytes);
1916 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1917 EXPECT_EQ(2 * kSystemPageSize, stats->activeBytes);
1918 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes);
1919 }
1920 CheckPageInCore(ptr1 - kPointerOffset, true);
1921 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1922 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1923 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1924 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDisca rdUnusedSystemPages);
1925 EXPECT_EQ(2u, page->numUnprovisionedSlots);
1926 CheckPageInCore(ptr1 - kPointerOffset, true);
1927 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1928 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
1929 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1930
1931 EXPECT_FALSE(page->freelistHead);
1932
1933 partitionFreeGeneric(genericAllocator.root(), ptr1);
1934 partitionFreeGeneric(genericAllocator.root(), ptr2);
1935 }
1936
1937 TestShutdown();
1938 }
1939
1940 // Tests that the countLeadingZeros() functions work to our satisfaction.
1941 // It doesn't seem worth the overhead of a whole new file for these tests, so
1942 // we'll put them here since partitionAllocGeneric will depend heavily on these
1943 // functions working correctly.
1944 TEST(PartitionAllocTest, CLZWorks)
1945 {
1946 EXPECT_EQ(32u, countLeadingZeros32(0u));
1947 EXPECT_EQ(31u, countLeadingZeros32(1u));
1948 EXPECT_EQ(1u, countLeadingZeros32(1u << 30));
1949 EXPECT_EQ(0u, countLeadingZeros32(1u << 31));
1950
1951 #if CPU(64BIT)
1952 EXPECT_EQ(64u, countLeadingZerosSizet(0ull));
1953 EXPECT_EQ(63u, countLeadingZerosSizet(1ull));
1954 EXPECT_EQ(32u, countLeadingZerosSizet(1ull << 31));
1955 EXPECT_EQ(1u, countLeadingZerosSizet(1ull << 62));
1956 EXPECT_EQ(0u, countLeadingZerosSizet(1ull << 63));
1957 #else
1958 EXPECT_EQ(32u, countLeadingZerosSizet(0u));
1959 EXPECT_EQ(31u, countLeadingZerosSizet(1u));
1960 EXPECT_EQ(1u, countLeadingZerosSizet(1u << 30));
1961 EXPECT_EQ(0u, countLeadingZerosSizet(1u << 31));
1962 #endif
1963 }
1964
1965 } // namespace WTF
1966
1967 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698