Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(491)

Side by Side Diff: base/allocator/partition_allocator/partition_alloc_unittest.cc

Issue 2518253002: Move Partition Allocator into Chromium base. (Closed)
Patch Set: EXPECT_DEATH is not supported on iOS; #ifdef it out. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/allocator/partition_allocator/partition_alloc.cc ('k') | base/logging.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * 3 // found in the LICENSE file.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 4
31 #include "wtf/allocator/PartitionAlloc.h" 5 #include "base/allocator/partition_allocator/partition_alloc.h"
32 6
33 #include "testing/gtest/include/gtest/gtest.h"
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/CPU.h"
36 #include "wtf/PtrUtil.h"
37 #include "wtf/Vector.h"
38 #include <memory>
39 #include <stdlib.h> 7 #include <stdlib.h>
40 #include <string.h> 8 #include <string.h>
41 9
42 #if OS(POSIX) 10 #include <memory>
11 #include <vector>
12
13 #include "base/bits.h"
14 #include "build/build_config.h"
15 #include "testing/gtest/include/gtest/gtest.h"
16
17 #if defined(OS_POSIX)
43 #include <sys/mman.h> 18 #include <sys/mman.h>
44 #include <sys/resource.h> 19 #include <sys/resource.h>
45 #include <sys/time.h> 20 #include <sys/time.h>
46 21
47 #ifndef MAP_ANONYMOUS 22 #ifndef MAP_ANONYMOUS
48 #define MAP_ANONYMOUS MAP_ANON 23 #define MAP_ANONYMOUS MAP_ANON
49 #endif 24 #endif
50 #endif // OS(POSIX) 25 #endif // defined(OS_POSIX)
26
27 namespace {
28 template <typename T>
29 std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
30 return std::unique_ptr<T[]>(ptr);
31 }
32 } // namespace
51 33
52 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 34 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
53 35
54 namespace WTF { 36 namespace base {
55 37
56 namespace { 38 namespace {
57 39
58 const size_t kTestMaxAllocation = 4096; 40 const size_t kTestMaxAllocation = 4096;
59 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; 41 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
60 PartitionAllocatorGeneric genericAllocator; 42 PartitionAllocatorGeneric genericAllocator;
61 43
62 const size_t kTestAllocSize = 16; 44 const size_t kTestAllocSize = 16;
63 #if !ENABLE(ASSERT) 45 #if !DCHECK_IS_ON()
64 const size_t kPointerOffset = 0; 46 const size_t kPointerOffset = 0;
65 const size_t kExtraAllocSize = 0; 47 const size_t kExtraAllocSize = 0;
66 #else 48 #else
67 const size_t kPointerOffset = WTF::kCookieSize; 49 const size_t kPointerOffset = kCookieSize;
68 const size_t kExtraAllocSize = WTF::kCookieSize * 2; 50 const size_t kExtraAllocSize = kCookieSize * 2;
69 #endif 51 #endif
70 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; 52 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
71 const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift; 53 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift;
72 54
73 const char* typeName = nullptr; 55 const char* typeName = nullptr;
74 56
75 void TestSetup() { 57 void TestSetup() {
76 allocator.init(); 58 allocator.init();
77 genericAllocator.init(); 59 genericAllocator.init();
78 } 60 }
79 61
80 void TestShutdown() { 62 void TestShutdown() {
81 // We expect no leaks in the general case. We have a test for leak 63 // We expect no leaks in the general case. We have a test for leak
82 // detection. 64 // detection.
83 EXPECT_TRUE(allocator.shutdown()); 65 EXPECT_TRUE(allocator.shutdown());
84 EXPECT_TRUE(genericAllocator.shutdown()); 66 EXPECT_TRUE(genericAllocator.shutdown());
85 } 67 }
86 68
87 #if !CPU(64BIT) || OS(POSIX) 69 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
88 bool SetAddressSpaceLimit() { 70 bool SetAddressSpaceLimit() {
89 #if !CPU(64BIT) 71 #if !defined(ARCH_CPU_64_BITS)
90 // 32 bits => address space is limited already. 72 // 32 bits => address space is limited already.
91 return true; 73 return true;
92 #elif OS(POSIX) && !OS(MACOSX) 74 #elif defined(OS_POSIX) && !defined(OS_MACOSX)
93 // Mac will accept RLIMIT_AS changes but it is not enforced. 75 // Mac will accept RLIMIT_AS changes but it is not enforced.
94 // See https://crbug.com/435269 and rdar://17576114. 76 // See https://crbug.com/435269 and rdar://17576114.
95 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024; 77 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024;
96 struct rlimit limit; 78 struct rlimit limit;
97 if (getrlimit(RLIMIT_AS, &limit) != 0) 79 if (getrlimit(RLIMIT_AS, &limit) != 0)
98 return false; 80 return false;
99 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { 81 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
100 limit.rlim_cur = kAddressSpaceLimit; 82 limit.rlim_cur = kAddressSpaceLimit;
101 if (setrlimit(RLIMIT_AS, &limit) != 0) 83 if (setrlimit(RLIMIT_AS, &limit) != 0)
102 return false; 84 return false;
103 } 85 }
104 return true; 86 return true;
105 #else 87 #else
106 return false; 88 return false;
107 #endif 89 #endif
108 } 90 }
109 91
110 bool ClearAddressSpaceLimit() { 92 bool ClearAddressSpaceLimit() {
111 #if !CPU(64BIT) 93 #if !defined(ARCH_CPU_64_BITS)
112 return true; 94 return true;
113 #elif OS(POSIX) 95 #elif defined(OS_POSIX)
114 struct rlimit limit; 96 struct rlimit limit;
115 if (getrlimit(RLIMIT_AS, &limit) != 0) 97 if (getrlimit(RLIMIT_AS, &limit) != 0)
116 return false; 98 return false;
117 limit.rlim_cur = limit.rlim_max; 99 limit.rlim_cur = limit.rlim_max;
118 if (setrlimit(RLIMIT_AS, &limit) != 0) 100 if (setrlimit(RLIMIT_AS, &limit) != 0)
119 return false; 101 return false;
120 return true; 102 return true;
121 #else 103 #else
122 return false; 104 return false;
123 #endif 105 #endif
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
163 for (i = 0; i < numSlots; ++i) { 145 for (i = 0; i < numSlots; ++i) {
164 partitionFree(ptr + kPointerOffset); 146 partitionFree(ptr + kPointerOffset);
165 ptr += size; 147 ptr += size;
166 } 148 }
167 } 149 }
168 150
169 void CycleFreeCache(size_t size) { 151 void CycleFreeCache(size_t size) {
170 size_t realSize = size + kExtraAllocSize; 152 size_t realSize = size + kExtraAllocSize;
171 size_t bucketIdx = realSize >> kBucketShift; 153 size_t bucketIdx = realSize >> kBucketShift;
172 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; 154 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
173 ASSERT(!bucket->activePagesHead->numAllocatedSlots); 155 DCHECK(!bucket->activePagesHead->numAllocatedSlots);
174 156
175 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { 157 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
176 void* ptr = partitionAlloc(allocator.root(), size, typeName); 158 void* ptr = partitionAlloc(allocator.root(), size, typeName);
177 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); 159 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
178 partitionFree(ptr); 160 partitionFree(ptr);
179 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); 161 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
180 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); 162 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
181 } 163 }
182 } 164 }
183 165
184 void CycleGenericFreeCache(size_t size) { 166 void CycleGenericFreeCache(size_t size) {
185 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { 167 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
186 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); 168 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
187 PartitionPage* page = 169 PartitionPage* page =
188 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); 170 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
189 PartitionBucket* bucket = page->bucket; 171 PartitionBucket* bucket = page->bucket;
190 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); 172 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
191 partitionFreeGeneric(genericAllocator.root(), ptr); 173 partitionFreeGeneric(genericAllocator.root(), ptr);
192 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); 174 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
193 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); 175 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
194 } 176 }
195 } 177 }
196 178
197 void CheckPageInCore(void* ptr, bool inCore) { 179 void CheckPageInCore(void* ptr, bool inCore) {
198 #if OS(LINUX) 180 #if defined(OS_LINUX)
199 unsigned char ret; 181 unsigned char ret;
200 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); 182 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
201 EXPECT_EQ(inCore, ret); 183 EXPECT_EQ(inCore, ret);
202 #endif 184 #endif
203 } 185 }
204 186
205 class MockPartitionStatsDumper : public PartitionStatsDumper { 187 class MockPartitionStatsDumper : public PartitionStatsDumper {
206 public: 188 public:
207 MockPartitionStatsDumper() 189 MockPartitionStatsDumper()
208 : m_totalResidentBytes(0), 190 : m_totalResidentBytes(0),
209 m_totalActiveBytes(0), 191 m_totalActiveBytes(0),
210 m_totalDecommittableBytes(0), 192 m_totalDecommittableBytes(0),
211 m_totalDiscardableBytes(0) {} 193 m_totalDiscardableBytes(0) {}
212 194
213 void partitionDumpTotals(const char* partitionName, 195 void partitionDumpTotals(const char* partitionName,
214 const PartitionMemoryStats* memoryStats) override { 196 const PartitionMemoryStats* memoryStats) override {
215 EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentBytes); 197 EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentBytes);
216 EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes); 198 EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes);
217 EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes); 199 EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes);
218 EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableBytes); 200 EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableBytes);
219 EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes); 201 EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes);
220 } 202 }
221 203
222 void partitionsDumpBucketStats( 204 void partitionsDumpBucketStats(
223 const char* partitionName, 205 const char* partitionName,
224 const PartitionBucketMemoryStats* memoryStats) override { 206 const PartitionBucketMemoryStats* memoryStats) override {
225 (void)partitionName; 207 (void)partitionName;
226 EXPECT_TRUE(memoryStats->isValid); 208 EXPECT_TRUE(memoryStats->isValid);
227 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask); 209 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask);
228 m_bucketStats.append(*memoryStats); 210 m_bucketStats.push_back(*memoryStats);
229 m_totalResidentBytes += memoryStats->residentBytes; 211 m_totalResidentBytes += memoryStats->residentBytes;
230 m_totalActiveBytes += memoryStats->activeBytes; 212 m_totalActiveBytes += memoryStats->activeBytes;
231 m_totalDecommittableBytes += memoryStats->decommittableBytes; 213 m_totalDecommittableBytes += memoryStats->decommittableBytes;
232 m_totalDiscardableBytes += memoryStats->discardableBytes; 214 m_totalDiscardableBytes += memoryStats->discardableBytes;
233 } 215 }
234 216
235 bool IsMemoryAllocationRecorded() { 217 bool IsMemoryAllocationRecorded() {
236 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0; 218 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0;
237 } 219 }
238 220
239 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize) { 221 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize) {
240 for (size_t i = 0; i < m_bucketStats.size(); ++i) { 222 for (size_t i = 0; i < m_bucketStats.size(); ++i) {
241 if (m_bucketStats[i].bucketSlotSize == bucketSize) 223 if (m_bucketStats[i].bucketSlotSize == bucketSize)
242 return &m_bucketStats[i]; 224 return &m_bucketStats[i];
243 } 225 }
244 return 0; 226 return 0;
245 } 227 }
246 228
247 private: 229 private:
248 size_t m_totalResidentBytes; 230 size_t m_totalResidentBytes;
249 size_t m_totalActiveBytes; 231 size_t m_totalActiveBytes;
250 size_t m_totalDecommittableBytes; 232 size_t m_totalDecommittableBytes;
251 size_t m_totalDiscardableBytes; 233 size_t m_totalDiscardableBytes;
252 234
253 Vector<PartitionBucketMemoryStats> m_bucketStats; 235 std::vector<PartitionBucketMemoryStats> m_bucketStats;
254 }; 236 };
255 237
256 } // anonymous namespace 238 } // anonymous namespace
257 239
258 // Check that the most basic of allocate / free pairs work. 240 // Check that the most basic of allocate / free pairs work.
259 TEST(PartitionAllocTest, Basic) { 241 TEST(PartitionAllocTest, Basic) {
260 TestSetup(); 242 TestSetup();
261 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; 243 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
262 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; 244 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage;
263 245
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
446 TEST(PartitionAllocTest, FreePageListPageTransitions) { 428 TEST(PartitionAllocTest, FreePageListPageTransitions) {
447 TestSetup(); 429 TestSetup();
448 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; 430 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
449 431
450 size_t numToFillFreeListPage = 432 size_t numToFillFreeListPage =
451 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); 433 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
452 // The +1 is because we need to account for the fact that the current page 434 // The +1 is because we need to account for the fact that the current page
453 // never gets thrown on the freelist. 435 // never gets thrown on the freelist.
454 ++numToFillFreeListPage; 436 ++numToFillFreeListPage;
455 std::unique_ptr<PartitionPage* []> pages = 437 std::unique_ptr<PartitionPage* []> pages =
456 wrapArrayUnique(new PartitionPage*[numToFillFreeListPage]); 438 WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]);
457 439
458 size_t i; 440 size_t i;
459 for (i = 0; i < numToFillFreeListPage; ++i) { 441 for (i = 0; i < numToFillFreeListPage; ++i) {
460 pages[i] = GetFullPage(kTestAllocSize); 442 pages[i] = GetFullPage(kTestAllocSize);
461 } 443 }
462 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); 444 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
463 for (i = 0; i < numToFillFreeListPage; ++i) 445 for (i = 0; i < numToFillFreeListPage; ++i)
464 FreeFullPage(pages[i]); 446 FreeFullPage(pages[i]);
465 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); 447 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
466 EXPECT_TRUE(bucket->emptyPagesHead); 448 EXPECT_TRUE(bucket->emptyPagesHead);
(...skipping 25 matching lines...) Expand all
492 TestSetup(); 474 TestSetup();
493 // This is guaranteed to cross a super page boundary because the first 475 // This is guaranteed to cross a super page boundary because the first
494 // partition page "slot" will be taken up by a guard page. 476 // partition page "slot" will be taken up by a guard page.
495 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage; 477 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
496 // The super page should begin and end in a guard so we one less page in 478 // The super page should begin and end in a guard so we one less page in
497 // order to allocate a single page in the new super page. 479 // order to allocate a single page in the new super page.
498 --numPagesNeeded; 480 --numPagesNeeded;
499 481
500 EXPECT_GT(numPagesNeeded, 1u); 482 EXPECT_GT(numPagesNeeded, 1u);
501 std::unique_ptr<PartitionPage* []> pages; 483 std::unique_ptr<PartitionPage* []> pages;
502 pages = wrapArrayUnique(new PartitionPage*[numPagesNeeded]); 484 pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]);
503 uintptr_t firstSuperPageBase = 0; 485 uintptr_t firstSuperPageBase = 0;
504 size_t i; 486 size_t i;
505 for (i = 0; i < numPagesNeeded; ++i) { 487 for (i = 0; i < numPagesNeeded; ++i) {
506 pages[i] = GetFullPage(kTestAllocSize); 488 pages[i] = GetFullPage(kTestAllocSize);
507 void* storagePtr = partitionPageToPointer(pages[i]); 489 void* storagePtr = partitionPageToPointer(pages[i]);
508 if (!i) 490 if (!i)
509 firstSuperPageBase = 491 firstSuperPageBase =
510 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; 492 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
511 if (i == numPagesNeeded - 1) { 493 if (i == numPagesNeeded - 1) {
512 uintptr_t secondSuperPageBase = 494 uintptr_t secondSuperPageBase =
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 kGenericSmallestBucket, typeName); 535 kGenericSmallestBucket, typeName);
554 EXPECT_EQ(ptr, newPtr); 536 EXPECT_EQ(ptr, newPtr);
555 537
556 // Change the size of the realloc, switching buckets. 538 // Change the size of the realloc, switching buckets.
557 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 539 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr,
558 kGenericSmallestBucket + 1, typeName); 540 kGenericSmallestBucket + 1, typeName);
559 EXPECT_NE(newPtr, ptr); 541 EXPECT_NE(newPtr, ptr);
560 // Check that the realloc copied correctly. 542 // Check that the realloc copied correctly.
561 char* newCharPtr = static_cast<char*>(newPtr); 543 char* newCharPtr = static_cast<char*>(newPtr);
562 EXPECT_EQ(*newCharPtr, 'A'); 544 EXPECT_EQ(*newCharPtr, 'A');
563 #if ENABLE(ASSERT) 545 #if DCHECK_IS_ON()
564 // Subtle: this checks for an old bug where we copied too much from the 546 // Subtle: this checks for an old bug where we copied too much from the
565 // source of the realloc. The condition can be detected by a trashing of 547 // source of the realloc. The condition can be detected by a trashing of
566 // the uninitialized value in the space of the upsized allocation. 548 // the uninitialized value in the space of the upsized allocation.
567 EXPECT_EQ(kUninitializedByte, 549 EXPECT_EQ(kUninitializedByte,
568 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket))); 550 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket)));
569 #endif 551 #endif
570 *newCharPtr = 'B'; 552 *newCharPtr = 'B';
571 // The realloc moved. To check that the old allocation was freed, we can 553 // The realloc moved. To check that the old allocation was freed, we can
572 // do an alloc of the old allocation size and check that the old allocation 554 // do an alloc of the old allocation size and check that the old allocation
573 // address is at the head of the freelist and reused. 555 // address is at the head of the freelist and reused.
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
664 partitionFreeGeneric(genericAllocator.root(), ptr3); 646 partitionFreeGeneric(genericAllocator.root(), ptr3);
665 partitionFreeGeneric(genericAllocator.root(), ptr2); 647 partitionFreeGeneric(genericAllocator.root(), ptr2);
666 // Should be freeable at this point. 648 // Should be freeable at this point.
667 EXPECT_NE(-1, page->emptyCacheIndex); 649 EXPECT_NE(-1, page->emptyCacheIndex);
668 EXPECT_EQ(0, page->numAllocatedSlots); 650 EXPECT_EQ(0, page->numAllocatedSlots);
669 EXPECT_EQ(0, page->numUnprovisionedSlots); 651 EXPECT_EQ(0, page->numUnprovisionedSlots);
670 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); 652 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
671 EXPECT_EQ(ptr3, newPtr); 653 EXPECT_EQ(ptr3, newPtr);
672 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); 654 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
673 EXPECT_EQ(ptr2, newPtr); 655 EXPECT_EQ(ptr2, newPtr);
674 #if OS(LINUX) && !ENABLE(ASSERT) 656 #if defined(OS_LINUX) && !DCHECK_IS_ON()
675 // On Linux, we have a guarantee that freelisting a page should cause its 657 // On Linux, we have a guarantee that freelisting a page should cause its
676 // contents to be nulled out. We check for null here to detect an bug we 658 // contents to be nulled out. We check for null here to detect an bug we
677 // had where a large slot size was causing us to not properly free all 659 // had where a large slot size was causing us to not properly free all
678 // resources back to the system. 660 // resources back to the system.
679 // We only run the check when asserts are disabled because when they are 661 // We only run the check when asserts are disabled because when they are
680 // enabled, the allocated area is overwritten with an "uninitialized" 662 // enabled, the allocated area is overwritten with an "uninitialized"
681 // byte pattern. 663 // byte pattern.
682 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); 664 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
683 #endif 665 #endif
684 partitionFreeGeneric(genericAllocator.root(), newPtr); 666 partitionFreeGeneric(genericAllocator.root(), newPtr);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
800 size_t size = kSystemPageSize - kExtraAllocSize; 782 size_t size = kSystemPageSize - kExtraAllocSize;
801 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); 783 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
802 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); 784 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
803 memset(ptr, 'A', size); 785 memset(ptr, 'A', size);
804 ptr2 = 786 ptr2 =
805 partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName); 787 partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName);
806 EXPECT_NE(ptr, ptr2); 788 EXPECT_NE(ptr, ptr2);
807 char* charPtr2 = static_cast<char*>(ptr2); 789 char* charPtr2 = static_cast<char*>(ptr2);
808 EXPECT_EQ('A', charPtr2[0]); 790 EXPECT_EQ('A', charPtr2[0]);
809 EXPECT_EQ('A', charPtr2[size - 1]); 791 EXPECT_EQ('A', charPtr2[size - 1]);
810 #if ENABLE(ASSERT) 792 #if DCHECK_IS_ON()
811 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size])); 793 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
812 #endif 794 #endif
813 795
814 // Test that shrinking an allocation with realloc() also copies everything 796 // Test that shrinking an allocation with realloc() also copies everything
815 // from the old allocation. 797 // from the old allocation.
816 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, 798 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1,
817 typeName); 799 typeName);
818 EXPECT_NE(ptr2, ptr); 800 EXPECT_NE(ptr2, ptr);
819 char* charPtr = static_cast<char*>(ptr); 801 char* charPtr = static_cast<char*>(ptr);
820 EXPECT_EQ('A', charPtr[0]); 802 EXPECT_EQ('A', charPtr[0]);
821 EXPECT_EQ('A', charPtr[size - 2]); 803 EXPECT_EQ('A', charPtr[size - 2]);
822 #if ENABLE(ASSERT) 804 #if DCHECK_IS_ON()
823 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); 805 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
824 #endif 806 #endif
825 807
826 partitionFreeGeneric(genericAllocator.root(), ptr); 808 partitionFreeGeneric(genericAllocator.root(), ptr);
827 809
828 // Test that shrinking a direct mapped allocation happens in-place. 810 // Test that shrinking a direct mapped allocation happens in-place.
829 size = kGenericMaxBucketed + 16 * kSystemPageSize; 811 size = kGenericMaxBucketed + 16 * kSystemPageSize;
830 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); 812 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName);
831 size_t actualSize = partitionAllocGetSize(ptr); 813 size_t actualSize = partitionAllocGetSize(ptr);
832 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 814 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr,
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
1066 TestShutdown(); 1048 TestShutdown();
1067 } 1049 }
1068 1050
1069 // Test correct handling if our mapping collides with another. 1051 // Test correct handling if our mapping collides with another.
1070 TEST(PartitionAllocTest, MappingCollision) { 1052 TEST(PartitionAllocTest, MappingCollision) {
1071 TestSetup(); 1053 TestSetup();
1072 // The -2 is because the first and last partition pages in a super page are 1054 // The -2 is because the first and last partition pages in a super page are
1073 // guard pages. 1055 // guard pages.
1074 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2; 1056 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
1075 std::unique_ptr<PartitionPage* []> firstSuperPagePages = 1057 std::unique_ptr<PartitionPage* []> firstSuperPagePages =
1076 wrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); 1058 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
1077 std::unique_ptr<PartitionPage* []> secondSuperPagePages = 1059 std::unique_ptr<PartitionPage* []> secondSuperPagePages =
1078 wrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); 1060 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
1079 1061
1080 size_t i; 1062 size_t i;
1081 for (i = 0; i < numPartitionPagesNeeded; ++i) 1063 for (i = 0; i < numPartitionPagesNeeded; ++i)
1082 firstSuperPagePages[i] = GetFullPage(kTestAllocSize); 1064 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
1083 1065
1084 char* pageBase = 1066 char* pageBase =
1085 reinterpret_cast<char*>(partitionPageToPointer(firstSuperPagePages[0])); 1067 reinterpret_cast<char*>(partitionPageToPointer(firstSuperPagePages[0]));
1086 EXPECT_EQ(kPartitionPageSize, 1068 EXPECT_EQ(kPartitionPageSize,
1087 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); 1069 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
1088 pageBase -= kPartitionPageSize; 1070 pageBase -= kPartitionPageSize;
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
1259 EXPECT_TRUE(ptr); 1241 EXPECT_TRUE(ptr);
1260 partitionFreeGeneric(genericAllocator.root(), ptr); 1242 partitionFreeGeneric(genericAllocator.root(), ptr);
1261 1243
1262 EXPECT_TRUE(bucket->activePagesHead); 1244 EXPECT_TRUE(bucket->activePagesHead);
1263 EXPECT_TRUE(bucket->emptyPagesHead); 1245 EXPECT_TRUE(bucket->emptyPagesHead);
1264 EXPECT_TRUE(bucket->decommittedPagesHead); 1246 EXPECT_TRUE(bucket->decommittedPagesHead);
1265 1247
1266 TestShutdown(); 1248 TestShutdown();
1267 } 1249 }
1268 1250
1269 #if !CPU(64BIT) || OS(POSIX) 1251 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
1270 1252
1271 static void DoReturnNullTest(size_t allocSize) { 1253 static void DoReturnNullTest(size_t allocSize) {
1272 TestSetup(); 1254 TestSetup();
1273 1255
1274 EXPECT_TRUE(SetAddressSpaceLimit()); 1256 EXPECT_TRUE(SetAddressSpaceLimit());
1275 1257
1276 // Work out the number of allocations for 6 GB of memory. 1258 // Work out the number of allocations for 6 GB of memory.
1277 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); 1259 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
1278 1260
1279 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric( 1261 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric(
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1312 1294
1313 EXPECT_TRUE(ClearAddressSpaceLimit()); 1295 EXPECT_TRUE(ClearAddressSpaceLimit());
1314 1296
1315 TestShutdown(); 1297 TestShutdown();
1316 } 1298 }
1317 1299
1318 // Tests that if an allocation fails in "return null" mode, repeating it doesn't 1300 // Tests that if an allocation fails in "return null" mode, repeating it doesn't
1319 // crash, and still returns null. The test tries to allocate 6 GB of memory in 1301 // crash, and still returns null. The test tries to allocate 6 GB of memory in
1320 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB 1302 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB
1321 // using setrlimit() first. 1303 // using setrlimit() first.
1322 #if OS(MACOSX) 1304 #if defined(OS_MACOSX)
1323 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull 1305 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull
1324 #else 1306 #else
1325 #define MAYBE_RepeatedReturnNull RepeatedReturnNull 1307 #define MAYBE_RepeatedReturnNull RepeatedReturnNull
1326 #endif 1308 #endif
1327 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) { 1309 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) {
1328 // A single-slot but non-direct-mapped allocation size. 1310 // A single-slot but non-direct-mapped allocation size.
1329 DoReturnNullTest(512 * 1024); 1311 DoReturnNullTest(512 * 1024);
1330 } 1312 }
1331 1313
1332 // Another "return null" test but for larger, direct-mapped allocations. 1314 // Another "return null" test but for larger, direct-mapped allocations.
1333 #if OS(MACOSX) 1315 #if defined(OS_MACOSX)
1334 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect 1316 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
1335 #else 1317 #else
1336 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect 1318 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
1337 #endif 1319 #endif
1338 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) { 1320 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) {
1339 // A direct-mapped allocation size. 1321 // A direct-mapped allocation size.
1340 DoReturnNullTest(256 * 1024 * 1024); 1322 DoReturnNullTest(256 * 1024 * 1024);
1341 } 1323 }
1342 1324
1343 #endif // !CPU(64BIT) || OS(POSIX) 1325 #endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
1344 1326
1345 #if !OS(ANDROID) 1327 #if !defined(OS_ANDROID) && !defined(OS_IOS)
gab 2016/12/07 15:34:56 Drive-by: please use // Death tests misbehave on
palmer 2016/12/07 19:00:13 Done.
gab 2016/12/07 20:40:48 Actually, I don't see this change, missing upload?
1346 1328
1347 // Make sure that malloc(-1) dies. 1329 // Make sure that malloc(-1) dies.
1348 // In the past, we had an integer overflow that would alias malloc(-1) to 1330 // In the past, we had an integer overflow that would alias malloc(-1) to
1349 // malloc(0), which is not good. 1331 // malloc(0), which is not good.
1350 TEST(PartitionAllocDeathTest, LargeAllocs) { 1332 TEST(PartitionAllocDeathTest, LargeAllocs) {
1351 TestSetup(); 1333 TestSetup();
1352 // Largest alloc. 1334 // Largest alloc.
1353 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), 1335 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(),
1354 static_cast<size_t>(-1), typeName), 1336 static_cast<size_t>(-1), typeName),
1355 ""); 1337 "");
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1445 EXPECT_TRUE(ptr); 1427 EXPECT_TRUE(ptr);
1446 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; 1428 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
1447 1429
1448 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); 1430 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), "");
1449 1431
1450 partitionFreeGeneric(genericAllocator.root(), ptr); 1432 partitionFreeGeneric(genericAllocator.root(), ptr);
1451 1433
1452 TestShutdown(); 1434 TestShutdown();
1453 } 1435 }
1454 1436
1455 #endif // !OS(ANDROID) 1437 #endif // !defined(OS_ANDROID) && !defined(OS_IOS)
1456 1438
1457 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without 1439 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without
1458 // crashing and returns non zero values when memory is allocated. 1440 // crashing and returns non zero values when memory is allocated.
1459 TEST(PartitionAllocTest, DumpMemoryStats) { 1441 TEST(PartitionAllocTest, DumpMemoryStats) {
1460 TestSetup(); 1442 TestSetup();
1461 { 1443 {
1462 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); 1444 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName);
1463 MockPartitionStatsDumper mockStatsDumper; 1445 MockPartitionStatsDumper mockStatsDumper;
1464 partitionDumpStats(allocator.root(), "mock_allocator", 1446 partitionDumpStats(allocator.root(), "mock_allocator",
1465 false /* detailed dump */, &mockStatsDumper); 1447 false /* detailed dump */, &mockStatsDumper);
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after
2106 2088
2107 EXPECT_FALSE(page->freelistHead); 2089 EXPECT_FALSE(page->freelistHead);
2108 2090
2109 partitionFreeGeneric(genericAllocator.root(), ptr1); 2091 partitionFreeGeneric(genericAllocator.root(), ptr1);
2110 partitionFreeGeneric(genericAllocator.root(), ptr2); 2092 partitionFreeGeneric(genericAllocator.root(), ptr2);
2111 } 2093 }
2112 2094
2113 TestShutdown(); 2095 TestShutdown();
2114 } 2096 }
2115 2097
2116 } // namespace WTF 2098 TEST(PartitionAllocTest, CLZWorks) {
2099 EXPECT_EQ(32u, bits::CountLeadingZeroBits32(0u));
2100 EXPECT_EQ(31u, bits::CountLeadingZeroBits32(1u));
2101 EXPECT_EQ(1u, bits::CountLeadingZeroBits32(1u << 30));
2102 EXPECT_EQ(0u, bits::CountLeadingZeroBits32(1u << 31));
2103
2104 #if defined(ARCH_CPU_64_BITS)
2105 EXPECT_EQ(64u, bits::CountLeadingZeroBitsSizeT(0ull));
2106 EXPECT_EQ(63u, bits::CountLeadingZeroBitsSizeT(1ull));
2107 EXPECT_EQ(32u, bits::CountLeadingZeroBitsSizeT(1ull << 31));
2108 EXPECT_EQ(1u, bits::CountLeadingZeroBitsSizeT(1ull << 62));
2109 EXPECT_EQ(0u, bits::CountLeadingZeroBitsSizeT(1ull << 63));
2110 #else
2111 EXPECT_EQ(32u, bits::CountLeadingZeroBitsSizeT(0u));
2112 EXPECT_EQ(31u, bits::CountLeadingZeroBitsSizeT(1u));
2113 EXPECT_EQ(1u, bits::CountLeadingZeroBitsSizeT(1u << 30));
2114 EXPECT_EQ(0u, bits::CountLeadingZeroBitsSizeT(1u << 31));
2115 #endif
2116 }
2117
2118 } // namespace base
2117 2119
2118 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 2120 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
OLDNEW
« no previous file with comments | « base/allocator/partition_allocator/partition_alloc.cc ('k') | base/logging.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698