OLD | NEW |
---|---|
1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
3 * | 3 // found in the LICENSE file. |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 */ | |
30 | 4 |
31 #include "wtf/allocator/PartitionAlloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
32 | 6 |
33 #include "testing/gtest/include/gtest/gtest.h" | |
34 #include "wtf/BitwiseOperations.h" | |
35 #include "wtf/CPU.h" | |
36 #include "wtf/PtrUtil.h" | |
37 #include "wtf/Vector.h" | |
38 #include <memory> | |
39 #include <stdlib.h> | 7 #include <stdlib.h> |
40 #include <string.h> | 8 #include <string.h> |
41 | 9 |
42 #if OS(POSIX) | 10 #include <memory> |
11 | |
12 #include "base/bits.h" | |
Primiano Tucci (use gerrit)
2016/11/28 12:06:50
+build_config.h
palmer
2016/12/01 00:48:24
Done.
| |
13 #include "testing/gtest/include/gtest/gtest.h" | |
14 | |
15 #if defined(OS_POSIX) | |
43 #include <sys/mman.h> | 16 #include <sys/mman.h> |
44 #include <sys/resource.h> | 17 #include <sys/resource.h> |
45 #include <sys/time.h> | 18 #include <sys/time.h> |
46 | 19 |
47 #ifndef MAP_ANONYMOUS | 20 #ifndef MAP_ANONYMOUS |
48 #define MAP_ANONYMOUS MAP_ANON | 21 #define MAP_ANONYMOUS MAP_ANON |
49 #endif | 22 #endif |
50 #endif // OS(POSIX) | 23 #endif // defined(OSPOSIX) |
51 | 24 |
52 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 25 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
53 | 26 |
54 namespace WTF { | 27 namespace base { |
55 | 28 |
56 namespace { | 29 namespace { |
57 | 30 |
31 // TODO(palmer): Remove these if possible. | |
Primiano Tucci (use gerrit)
2016/11/28 12:06:50
Well they seem already commented out. Removing the
palmer
2016/12/01 00:48:24
Done.
| |
32 // using base::SizeSpecificPartitionAllocator; | |
33 // using base::PartitionAllocatorGeneric; | |
34 // using base::PartitionRoot; | |
35 // using base::partitionAllocInit; | |
36 // using base::partitionAllocShutdown; | |
37 // using base::partitionAlloc; | |
38 // using base::partitionFree; | |
39 // using base::partitionAllocGeneric; | |
40 // using base::partitionFreeGeneric; | |
41 // using base::partitionReallocGeneric; | |
42 // using base::partitionAllocActualSize; | |
43 // using base::partitionAllocSupportsGetSize; | |
44 // using base::partitionAllocGetSize; | |
45 | |
58 const size_t kTestMaxAllocation = 4096; | 46 const size_t kTestMaxAllocation = 4096; |
59 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; | 47 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; |
60 PartitionAllocatorGeneric genericAllocator; | 48 PartitionAllocatorGeneric genericAllocator; |
61 | 49 |
62 const size_t kTestAllocSize = 16; | 50 const size_t kTestAllocSize = 16; |
63 #if !ENABLE(ASSERT) | 51 #if !DCHECK_IS_ON() |
64 const size_t kPointerOffset = 0; | 52 const size_t kPointerOffset = 0; |
65 const size_t kExtraAllocSize = 0; | 53 const size_t kExtraAllocSize = 0; |
66 #else | 54 #else |
67 const size_t kPointerOffset = WTF::kCookieSize; | 55 const size_t kPointerOffset = kCookieSize; |
68 const size_t kExtraAllocSize = WTF::kCookieSize * 2; | 56 const size_t kExtraAllocSize = kCookieSize * 2; |
69 #endif | 57 #endif |
70 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; | 58 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; |
71 const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift; | 59 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; |
72 | 60 |
73 const char* typeName = nullptr; | 61 const char* typeName = nullptr; |
74 | 62 |
75 void TestSetup() { | 63 void TestSetup() { |
76 allocator.init(); | 64 allocator.init(); |
77 genericAllocator.init(); | 65 genericAllocator.init(); |
78 } | 66 } |
79 | 67 |
80 void TestShutdown() { | 68 void TestShutdown() { |
81 // We expect no leaks in the general case. We have a test for leak | 69 // We expect no leaks in the general case. We have a test for leak |
82 // detection. | 70 // detection. |
83 EXPECT_TRUE(allocator.shutdown()); | 71 EXPECT_TRUE(allocator.shutdown()); |
84 EXPECT_TRUE(genericAllocator.shutdown()); | 72 EXPECT_TRUE(genericAllocator.shutdown()); |
85 } | 73 } |
86 | 74 |
87 #if !CPU(64BIT) || OS(POSIX) | 75 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
88 bool SetAddressSpaceLimit() { | 76 bool SetAddressSpaceLimit() { |
89 #if !CPU(64BIT) | 77 #if !defined(ARCH_CPU_64_BITS) |
90 // 32 bits => address space is limited already. | 78 // 32 bits => address space is limited already. |
91 return true; | 79 return true; |
92 #elif OS(POSIX) && !OS(MACOSX) | 80 #elif defined(OS_POSIX) && !defined(OS_MACOSX) |
93 // Mac will accept RLIMIT_AS changes but it is not enforced. | 81 // Mac will accept RLIMIT_AS changes but it is not enforced. |
94 // See https://crbug.com/435269 and rdar://17576114. | 82 // See https://crbug.com/435269 and rdar://17576114. |
95 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024; | 83 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024; |
96 struct rlimit limit; | 84 struct rlimit limit; |
97 if (getrlimit(RLIMIT_AS, &limit) != 0) | 85 if (getrlimit(RLIMIT_AS, &limit) != 0) |
98 return false; | 86 return false; |
99 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { | 87 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { |
100 limit.rlim_cur = kAddressSpaceLimit; | 88 limit.rlim_cur = kAddressSpaceLimit; |
101 if (setrlimit(RLIMIT_AS, &limit) != 0) | 89 if (setrlimit(RLIMIT_AS, &limit) != 0) |
102 return false; | 90 return false; |
103 } | 91 } |
104 return true; | 92 return true; |
105 #else | 93 #else |
106 return false; | 94 return false; |
107 #endif | 95 #endif |
108 } | 96 } |
109 | 97 |
110 bool ClearAddressSpaceLimit() { | 98 bool ClearAddressSpaceLimit() { |
111 #if !CPU(64BIT) | 99 #if !defined(ARCH_CPU_64_BITS) |
112 return true; | 100 return true; |
113 #elif OS(POSIX) | 101 #elif defined(OS_POSIX) |
114 struct rlimit limit; | 102 struct rlimit limit; |
115 if (getrlimit(RLIMIT_AS, &limit) != 0) | 103 if (getrlimit(RLIMIT_AS, &limit) != 0) |
116 return false; | 104 return false; |
117 limit.rlim_cur = limit.rlim_max; | 105 limit.rlim_cur = limit.rlim_max; |
118 if (setrlimit(RLIMIT_AS, &limit) != 0) | 106 if (setrlimit(RLIMIT_AS, &limit) != 0) |
119 return false; | 107 return false; |
120 return true; | 108 return true; |
121 #else | 109 #else |
122 return false; | 110 return false; |
123 #endif | 111 #endif |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
163 for (i = 0; i < numSlots; ++i) { | 151 for (i = 0; i < numSlots; ++i) { |
164 partitionFree(ptr + kPointerOffset); | 152 partitionFree(ptr + kPointerOffset); |
165 ptr += size; | 153 ptr += size; |
166 } | 154 } |
167 } | 155 } |
168 | 156 |
169 void CycleFreeCache(size_t size) { | 157 void CycleFreeCache(size_t size) { |
170 size_t realSize = size + kExtraAllocSize; | 158 size_t realSize = size + kExtraAllocSize; |
171 size_t bucketIdx = realSize >> kBucketShift; | 159 size_t bucketIdx = realSize >> kBucketShift; |
172 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; | 160 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; |
173 ASSERT(!bucket->activePagesHead->numAllocatedSlots); | 161 DCHECK(!bucket->activePagesHead->numAllocatedSlots); |
174 | 162 |
175 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 163 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
176 void* ptr = partitionAlloc(allocator.root(), size, typeName); | 164 void* ptr = partitionAlloc(allocator.root(), size, typeName); |
177 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); | 165 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); |
178 partitionFree(ptr); | 166 partitionFree(ptr); |
179 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); | 167 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); |
180 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); | 168 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); |
181 } | 169 } |
182 } | 170 } |
183 | 171 |
184 void CycleGenericFreeCache(size_t size) { | 172 void CycleGenericFreeCache(size_t size) { |
185 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 173 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
186 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 174 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); |
187 PartitionPage* page = | 175 PartitionPage* page = |
188 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 176 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); |
189 PartitionBucket* bucket = page->bucket; | 177 PartitionBucket* bucket = page->bucket; |
190 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); | 178 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); |
191 partitionFreeGeneric(genericAllocator.root(), ptr); | 179 partitionFreeGeneric(genericAllocator.root(), ptr); |
192 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); | 180 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); |
193 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); | 181 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); |
194 } | 182 } |
195 } | 183 } |
196 | 184 |
197 void CheckPageInCore(void* ptr, bool inCore) { | 185 void CheckPageInCore(void* ptr, bool inCore) { |
198 #if OS(LINUX) | 186 #if defined(OS_LINUX) |
199 unsigned char ret; | 187 unsigned char ret; |
200 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); | 188 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); |
201 EXPECT_EQ(inCore, ret); | 189 EXPECT_EQ(inCore, ret); |
202 #endif | 190 #endif |
203 } | 191 } |
204 | 192 |
205 class MockPartitionStatsDumper : public PartitionStatsDumper { | 193 class MockPartitionStatsDumper : public PartitionStatsDumper { |
206 public: | 194 public: |
207 MockPartitionStatsDumper() | 195 MockPartitionStatsDumper() |
208 : m_totalResidentBytes(0), | 196 : m_totalResidentBytes(0), |
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
553 kGenericSmallestBucket, typeName); | 541 kGenericSmallestBucket, typeName); |
554 EXPECT_EQ(ptr, newPtr); | 542 EXPECT_EQ(ptr, newPtr); |
555 | 543 |
556 // Change the size of the realloc, switching buckets. | 544 // Change the size of the realloc, switching buckets. |
557 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 545 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, |
558 kGenericSmallestBucket + 1, typeName); | 546 kGenericSmallestBucket + 1, typeName); |
559 EXPECT_NE(newPtr, ptr); | 547 EXPECT_NE(newPtr, ptr); |
560 // Check that the realloc copied correctly. | 548 // Check that the realloc copied correctly. |
561 char* newCharPtr = static_cast<char*>(newPtr); | 549 char* newCharPtr = static_cast<char*>(newPtr); |
562 EXPECT_EQ(*newCharPtr, 'A'); | 550 EXPECT_EQ(*newCharPtr, 'A'); |
563 #if ENABLE(ASSERT) | 551 #if DCHECK_IS_ON() |
564 // Subtle: this checks for an old bug where we copied too much from the | 552 // Subtle: this checks for an old bug where we copied too much from the |
565 // source of the realloc. The condition can be detected by a trashing of | 553 // source of the realloc. The condition can be detected by a trashing of |
566 // the uninitialized value in the space of the upsized allocation. | 554 // the uninitialized value in the space of the upsized allocation. |
567 EXPECT_EQ(kUninitializedByte, | 555 EXPECT_EQ(kUninitializedByte, |
568 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket))); | 556 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket))); |
569 #endif | 557 #endif |
570 *newCharPtr = 'B'; | 558 *newCharPtr = 'B'; |
571 // The realloc moved. To check that the old allocation was freed, we can | 559 // The realloc moved. To check that the old allocation was freed, we can |
572 // do an alloc of the old allocation size and check that the old allocation | 560 // do an alloc of the old allocation size and check that the old allocation |
573 // address is at the head of the freelist and reused. | 561 // address is at the head of the freelist and reused. |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
664 partitionFreeGeneric(genericAllocator.root(), ptr3); | 652 partitionFreeGeneric(genericAllocator.root(), ptr3); |
665 partitionFreeGeneric(genericAllocator.root(), ptr2); | 653 partitionFreeGeneric(genericAllocator.root(), ptr2); |
666 // Should be freeable at this point. | 654 // Should be freeable at this point. |
667 EXPECT_NE(-1, page->emptyCacheIndex); | 655 EXPECT_NE(-1, page->emptyCacheIndex); |
668 EXPECT_EQ(0, page->numAllocatedSlots); | 656 EXPECT_EQ(0, page->numAllocatedSlots); |
669 EXPECT_EQ(0, page->numUnprovisionedSlots); | 657 EXPECT_EQ(0, page->numUnprovisionedSlots); |
670 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 658 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); |
671 EXPECT_EQ(ptr3, newPtr); | 659 EXPECT_EQ(ptr3, newPtr); |
672 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 660 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); |
673 EXPECT_EQ(ptr2, newPtr); | 661 EXPECT_EQ(ptr2, newPtr); |
674 #if OS(LINUX) && !ENABLE(ASSERT) | 662 #if defined(OS_LINUX) && !DCHECK_IS_ON() |
675 // On Linux, we have a guarantee that freelisting a page should cause its | 663 // On Linux, we have a guarantee that freelisting a page should cause its |
676 // contents to be nulled out. We check for null here to detect an bug we | 664 // contents to be nulled out. We check for null here to detect an bug we |
677 // had where a large slot size was causing us to not properly free all | 665 // had where a large slot size was causing us to not properly free all |
678 // resources back to the system. | 666 // resources back to the system. |
679 // We only run the check when asserts are disabled because when they are | 667 // We only run the check when asserts are disabled because when they are |
680 // enabled, the allocated area is overwritten with an "uninitialized" | 668 // enabled, the allocated area is overwritten with an "uninitialized" |
681 // byte pattern. | 669 // byte pattern. |
682 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); | 670 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); |
683 #endif | 671 #endif |
684 partitionFreeGeneric(genericAllocator.root(), newPtr); | 672 partitionFreeGeneric(genericAllocator.root(), newPtr); |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
800 size_t size = kSystemPageSize - kExtraAllocSize; | 788 size_t size = kSystemPageSize - kExtraAllocSize; |
801 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); | 789 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); |
802 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 790 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); |
803 memset(ptr, 'A', size); | 791 memset(ptr, 'A', size); |
804 ptr2 = | 792 ptr2 = |
805 partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName); | 793 partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName); |
806 EXPECT_NE(ptr, ptr2); | 794 EXPECT_NE(ptr, ptr2); |
807 char* charPtr2 = static_cast<char*>(ptr2); | 795 char* charPtr2 = static_cast<char*>(ptr2); |
808 EXPECT_EQ('A', charPtr2[0]); | 796 EXPECT_EQ('A', charPtr2[0]); |
809 EXPECT_EQ('A', charPtr2[size - 1]); | 797 EXPECT_EQ('A', charPtr2[size - 1]); |
810 #if ENABLE(ASSERT) | 798 #if DCHECK_IS_ON() |
811 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size])); | 799 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size])); |
812 #endif | 800 #endif |
813 | 801 |
814 // Test that shrinking an allocation with realloc() also copies everything | 802 // Test that shrinking an allocation with realloc() also copies everything |
815 // from the old allocation. | 803 // from the old allocation. |
816 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, | 804 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, |
817 typeName); | 805 typeName); |
818 EXPECT_NE(ptr2, ptr); | 806 EXPECT_NE(ptr2, ptr); |
819 char* charPtr = static_cast<char*>(ptr); | 807 char* charPtr = static_cast<char*>(ptr); |
820 EXPECT_EQ('A', charPtr[0]); | 808 EXPECT_EQ('A', charPtr[0]); |
821 EXPECT_EQ('A', charPtr[size - 2]); | 809 EXPECT_EQ('A', charPtr[size - 2]); |
822 #if ENABLE(ASSERT) | 810 #if DCHECK_IS_ON() |
823 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); | 811 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); |
824 #endif | 812 #endif |
825 | 813 |
826 partitionFreeGeneric(genericAllocator.root(), ptr); | 814 partitionFreeGeneric(genericAllocator.root(), ptr); |
827 | 815 |
828 // Test that shrinking a direct mapped allocation happens in-place. | 816 // Test that shrinking a direct mapped allocation happens in-place. |
829 size = kGenericMaxBucketed + 16 * kSystemPageSize; | 817 size = kGenericMaxBucketed + 16 * kSystemPageSize; |
830 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 818 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); |
831 size_t actualSize = partitionAllocGetSize(ptr); | 819 size_t actualSize = partitionAllocGetSize(ptr); |
832 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, | 820 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, |
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1259 EXPECT_TRUE(ptr); | 1247 EXPECT_TRUE(ptr); |
1260 partitionFreeGeneric(genericAllocator.root(), ptr); | 1248 partitionFreeGeneric(genericAllocator.root(), ptr); |
1261 | 1249 |
1262 EXPECT_TRUE(bucket->activePagesHead); | 1250 EXPECT_TRUE(bucket->activePagesHead); |
1263 EXPECT_TRUE(bucket->emptyPagesHead); | 1251 EXPECT_TRUE(bucket->emptyPagesHead); |
1264 EXPECT_TRUE(bucket->decommittedPagesHead); | 1252 EXPECT_TRUE(bucket->decommittedPagesHead); |
1265 | 1253 |
1266 TestShutdown(); | 1254 TestShutdown(); |
1267 } | 1255 } |
1268 | 1256 |
1269 #if !CPU(64BIT) || OS(POSIX) | 1257 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
1270 | 1258 |
1271 static void DoReturnNullTest(size_t allocSize) { | 1259 static void DoReturnNullTest(size_t allocSize) { |
1272 TestSetup(); | 1260 TestSetup(); |
1273 | 1261 |
1274 EXPECT_TRUE(SetAddressSpaceLimit()); | 1262 EXPECT_TRUE(SetAddressSpaceLimit()); |
1275 | 1263 |
1276 // Work out the number of allocations for 6 GB of memory. | 1264 // Work out the number of allocations for 6 GB of memory. |
1277 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); | 1265 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); |
1278 | 1266 |
1279 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric( | 1267 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric( |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1312 | 1300 |
1313 EXPECT_TRUE(ClearAddressSpaceLimit()); | 1301 EXPECT_TRUE(ClearAddressSpaceLimit()); |
1314 | 1302 |
1315 TestShutdown(); | 1303 TestShutdown(); |
1316 } | 1304 } |
1317 | 1305 |
1318 // Tests that if an allocation fails in "return null" mode, repeating it doesn't | 1306 // Tests that if an allocation fails in "return null" mode, repeating it doesn't |
1319 // crash, and still returns null. The test tries to allocate 6 GB of memory in | 1307 // crash, and still returns null. The test tries to allocate 6 GB of memory in |
1320 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB | 1308 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB |
1321 // using setrlimit() first. | 1309 // using setrlimit() first. |
1322 #if OS(MACOSX) | 1310 #if defined(OS_MACOSX) |
1323 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull | 1311 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull |
1324 #else | 1312 #else |
1325 #define MAYBE_RepeatedReturnNull RepeatedReturnNull | 1313 #define MAYBE_RepeatedReturnNull RepeatedReturnNull |
1326 #endif | 1314 #endif |
1327 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) { | 1315 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) { |
1328 // A single-slot but non-direct-mapped allocation size. | 1316 // A single-slot but non-direct-mapped allocation size. |
1329 DoReturnNullTest(512 * 1024); | 1317 DoReturnNullTest(512 * 1024); |
1330 } | 1318 } |
1331 | 1319 |
1332 // Another "return null" test but for larger, direct-mapped allocations. | 1320 // Another "return null" test but for larger, direct-mapped allocations. |
1333 #if OS(MACOSX) | 1321 #if defined(OS_MACOSX) |
1334 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect | 1322 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect |
1335 #else | 1323 #else |
1336 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect | 1324 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect |
1337 #endif | 1325 #endif |
1338 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) { | 1326 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) { |
1339 // A direct-mapped allocation size. | 1327 // A direct-mapped allocation size. |
1340 DoReturnNullTest(256 * 1024 * 1024); | 1328 DoReturnNullTest(256 * 1024 * 1024); |
1341 } | 1329 } |
1342 | 1330 |
1343 #endif // !CPU(64BIT) || OS(POSIX) | 1331 #endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
1344 | 1332 |
1345 #if !OS(ANDROID) | 1333 #if !defined(OS_ANDROID) |
1346 | 1334 |
1347 // Make sure that malloc(-1) dies. | 1335 // Make sure that malloc(-1) dies. |
1348 // In the past, we had an integer overflow that would alias malloc(-1) to | 1336 // In the past, we had an integer overflow that would alias malloc(-1) to |
1349 // malloc(0), which is not good. | 1337 // malloc(0), which is not good. |
1350 TEST(PartitionAllocDeathTest, LargeAllocs) { | 1338 TEST(PartitionAllocDeathTest, LargeAllocs) { |
1351 TestSetup(); | 1339 TestSetup(); |
1352 // Largest alloc. | 1340 // Largest alloc. |
1353 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), | 1341 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), |
1354 static_cast<size_t>(-1), typeName), | 1342 static_cast<size_t>(-1), typeName), |
1355 ""); | 1343 ""); |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1445 EXPECT_TRUE(ptr); | 1433 EXPECT_TRUE(ptr); |
1446 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; | 1434 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; |
1447 | 1435 |
1448 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); | 1436 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); |
1449 | 1437 |
1450 partitionFreeGeneric(genericAllocator.root(), ptr); | 1438 partitionFreeGeneric(genericAllocator.root(), ptr); |
1451 | 1439 |
1452 TestShutdown(); | 1440 TestShutdown(); |
1453 } | 1441 } |
1454 | 1442 |
1455 #endif // !OS(ANDROID) | 1443 #endif // !defined(OS_ANDROID) |
1456 | 1444 |
1457 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without | 1445 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without |
1458 // crashing and returns non zero values when memory is allocated. | 1446 // crashing and returns non zero values when memory is allocated. |
1459 TEST(PartitionAllocTest, DumpMemoryStats) { | 1447 TEST(PartitionAllocTest, DumpMemoryStats) { |
1460 TestSetup(); | 1448 TestSetup(); |
1461 { | 1449 { |
1462 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 1450 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); |
1463 MockPartitionStatsDumper mockStatsDumper; | 1451 MockPartitionStatsDumper mockStatsDumper; |
1464 partitionDumpStats(allocator.root(), "mock_allocator", | 1452 partitionDumpStats(allocator.root(), "mock_allocator", |
1465 false /* detailed dump */, &mockStatsDumper); | 1453 false /* detailed dump */, &mockStatsDumper); |
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2106 | 2094 |
2107 EXPECT_FALSE(page->freelistHead); | 2095 EXPECT_FALSE(page->freelistHead); |
2108 | 2096 |
2109 partitionFreeGeneric(genericAllocator.root(), ptr1); | 2097 partitionFreeGeneric(genericAllocator.root(), ptr1); |
2110 partitionFreeGeneric(genericAllocator.root(), ptr2); | 2098 partitionFreeGeneric(genericAllocator.root(), ptr2); |
2111 } | 2099 } |
2112 | 2100 |
2113 TestShutdown(); | 2101 TestShutdown(); |
2114 } | 2102 } |
2115 | 2103 |
2116 } // namespace WTF | 2104 TEST(PartitionAllocTest, CLZWorks) { |
2105 EXPECT_EQ(32u, countLeadingZeros32(0u)); | |
2106 EXPECT_EQ(31u, countLeadingZeros32(1u)); | |
2107 EXPECT_EQ(1u, countLeadingZeros32(1u << 30)); | |
2108 EXPECT_EQ(0u, countLeadingZeros32(1u << 31)); | |
2109 | |
2110 #if defined(ARCH_CPU_64_BITS) | |
2111 EXPECT_EQ(64u, countLeadingZerosSizet(0ull)); | |
2112 EXPECT_EQ(63u, countLeadingZerosSizet(1ull)); | |
2113 EXPECT_EQ(32u, countLeadingZerosSizet(1ull << 31)); | |
2114 EXPECT_EQ(1u, countLeadingZerosSizet(1ull << 62)); | |
2115 EXPECT_EQ(0u, countLeadingZerosSizet(1ull << 63)); | |
2116 #else | |
2117 EXPECT_EQ(32u, countLeadingZerosSizet(0u)); | |
2118 EXPECT_EQ(31u, countLeadingZerosSizet(1u)); | |
2119 EXPECT_EQ(1u, countLeadingZerosSizet(1u << 30)); | |
2120 EXPECT_EQ(0u, countLeadingZerosSizet(1u << 31)); | |
2121 #endif | |
2122 } | |
2123 | |
2124 } // namespace base | |
2117 | 2125 |
2118 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 2126 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
OLD | NEW |