Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(248)

Side by Side Diff: Source/wtf/PartitionAllocTest.cpp

Issue 1184043002: Fix unit test style in Source/wtf/. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: apply review comments Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/wtf/MathExtrasTest.cpp ('k') | Source/wtf/RefPtrTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 #include <sys/resource.h> 45 #include <sys/resource.h>
46 #include <sys/time.h> 46 #include <sys/time.h>
47 47
48 #ifndef MAP_ANONYMOUS 48 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON 49 #define MAP_ANONYMOUS MAP_ANON
50 #endif 50 #endif
51 #endif // OS(POSIX) 51 #endif // OS(POSIX)
52 52
53 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 53 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
54 54
55 namespace WTF {
56
55 namespace { 57 namespace {
56 58
57 static const size_t kTestMaxAllocation = 4096; 59 const size_t kTestMaxAllocation = 4096;
58 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; 60 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
59 static PartitionAllocatorGeneric genericAllocator; 61 PartitionAllocatorGeneric genericAllocator;
60 62
61 static const size_t kTestAllocSize = 16; 63 const size_t kTestAllocSize = 16;
62 #if !ENABLE(ASSERT) 64 #if !ENABLE(ASSERT)
63 static const size_t kPointerOffset = 0; 65 const size_t kPointerOffset = 0;
64 static const size_t kExtraAllocSize = 0; 66 const size_t kExtraAllocSize = 0;
65 #else 67 #else
66 static const size_t kPointerOffset = WTF::kCookieSize; 68 const size_t kPointerOffset = WTF::kCookieSize;
67 static const size_t kExtraAllocSize = WTF::kCookieSize * 2; 69 const size_t kExtraAllocSize = WTF::kCookieSize * 2;
68 #endif 70 #endif
69 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; 71 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
70 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift; 72 const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
71 73
72 static void TestSetup() 74 void TestSetup()
73 { 75 {
74 allocator.init(); 76 allocator.init();
75 genericAllocator.init(); 77 genericAllocator.init();
76 } 78 }
77 79
78 static void TestShutdown() 80 void TestShutdown()
79 { 81 {
80 // We expect no leaks in the general case. We have a test for leak 82 // We expect no leaks in the general case. We have a test for leak
81 // detection. 83 // detection.
82 EXPECT_TRUE(allocator.shutdown()); 84 EXPECT_TRUE(allocator.shutdown());
83 EXPECT_TRUE(genericAllocator.shutdown()); 85 EXPECT_TRUE(genericAllocator.shutdown());
84 } 86 }
85 87
86 static bool SetAddressSpaceLimit() 88 bool SetAddressSpaceLimit()
87 { 89 {
88 #if !CPU(64BIT) 90 #if !CPU(64BIT)
89 // 32 bits => address space is limited already. 91 // 32 bits => address space is limited already.
90 return true; 92 return true;
91 #elif OS(POSIX) && !OS(MACOSX) 93 #elif OS(POSIX) && !OS(MACOSX)
92 // Mac will accept RLIMIT_AS changes but it is not enforced. 94 // Mac will accept RLIMIT_AS changes but it is not enforced.
93 // See https://crbug.com/435269 and rdar://17576114. 95 // See https://crbug.com/435269 and rdar://17576114.
94 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024; 96 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024;
95 struct rlimit limit; 97 struct rlimit limit;
96 if (getrlimit(RLIMIT_AS, &limit) != 0) 98 if (getrlimit(RLIMIT_AS, &limit) != 0)
97 return false; 99 return false;
98 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { 100 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
99 limit.rlim_cur = kAddressSpaceLimit; 101 limit.rlim_cur = kAddressSpaceLimit;
100 if (setrlimit(RLIMIT_AS, &limit) != 0) 102 if (setrlimit(RLIMIT_AS, &limit) != 0)
101 return false; 103 return false;
102 } 104 }
103 return true; 105 return true;
104 #else 106 #else
105 return false; 107 return false;
106 #endif 108 #endif
107 } 109 }
108 110
109 static bool ClearAddressSpaceLimit() 111 bool ClearAddressSpaceLimit()
110 { 112 {
111 #if !CPU(64BIT) 113 #if !CPU(64BIT)
112 return true; 114 return true;
113 #elif OS(POSIX) 115 #elif OS(POSIX)
114 struct rlimit limit; 116 struct rlimit limit;
115 if (getrlimit(RLIMIT_AS, &limit) != 0) 117 if (getrlimit(RLIMIT_AS, &limit) != 0)
116 return false; 118 return false;
117 limit.rlim_cur = limit.rlim_max; 119 limit.rlim_cur = limit.rlim_max;
118 if (setrlimit(RLIMIT_AS, &limit) != 0) 120 if (setrlimit(RLIMIT_AS, &limit) != 0)
119 return false; 121 return false;
120 return true; 122 return true;
121 #else 123 #else
122 return false; 124 return false;
123 #endif 125 #endif
124 } 126 }
125 127
126 static WTF::PartitionPage* GetFullPage(size_t size) 128 PartitionPage* GetFullPage(size_t size)
127 { 129 {
128 size_t realSize = size + kExtraAllocSize; 130 size_t realSize = size + kExtraAllocSize;
129 size_t bucketIdx = realSize >> WTF::kBucketShift; 131 size_t bucketIdx = realSize >> kBucketShift;
130 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; 132 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
131 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize; 133 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / re alSize;
132 void* first = 0; 134 void* first = 0;
133 void* last = 0; 135 void* last = 0;
134 size_t i; 136 size_t i;
135 for (i = 0; i < numSlots; ++i) { 137 for (i = 0; i < numSlots; ++i) {
136 void* ptr = partitionAlloc(allocator.root(), size); 138 void* ptr = partitionAlloc(allocator.root(), size);
137 EXPECT_TRUE(ptr); 139 EXPECT_TRUE(ptr);
138 if (!i) 140 if (!i)
139 first = WTF::partitionCookieFreePointerAdjust(ptr); 141 first = partitionCookieFreePointerAdjust(ptr);
140 else if (i == numSlots - 1) 142 else if (i == numSlots - 1)
141 last = WTF::partitionCookieFreePointerAdjust(ptr); 143 last = partitionCookieFreePointerAdjust(ptr);
142 } 144 }
143 EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(la st)); 145 EXPECT_EQ(partitionPointerToPage(first), partitionPointerToPage(last));
144 if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPag e) 146 if (bucket->numSystemPagesPerSlotSpan == kNumSystemPagesPerPartitionPage)
145 EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask); 147 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, rein terpret_cast<size_t>(last) & kPartitionPageBaseMask);
146 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocate dSlots)); 148 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocate dSlots));
147 EXPECT_EQ(0, bucket->activePagesHead->freelistHead); 149 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
148 EXPECT_TRUE(bucket->activePagesHead); 150 EXPECT_TRUE(bucket->activePagesHead);
149 EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage ); 151 EXPECT_TRUE(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
150 return bucket->activePagesHead; 152 return bucket->activePagesHead;
151 } 153 }
152 154
153 static void FreeFullPage(WTF::PartitionPage* page) 155 void FreeFullPage(PartitionPage* page)
154 { 156 {
155 size_t size = page->bucket->slotSize; 157 size_t size = page->bucket->slotSize;
156 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPag eSize) / size; 158 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize ) / size;
157 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots))); 159 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
158 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); 160 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
159 size_t i; 161 size_t i;
160 for (i = 0; i < numSlots; ++i) { 162 for (i = 0; i < numSlots; ++i) {
161 partitionFree(ptr + kPointerOffset); 163 partitionFree(ptr + kPointerOffset);
162 ptr += size; 164 ptr += size;
163 } 165 }
164 } 166 }
165 167
166 static void CycleFreeCache(size_t size) 168 void CycleFreeCache(size_t size)
167 { 169 {
168 size_t realSize = size + kExtraAllocSize; 170 size_t realSize = size + kExtraAllocSize;
169 size_t bucketIdx = realSize >> WTF::kBucketShift; 171 size_t bucketIdx = realSize >> kBucketShift;
170 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; 172 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
171 ASSERT(!bucket->activePagesHead->numAllocatedSlots); 173 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
172 174
173 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) { 175 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
174 void* ptr = partitionAlloc(allocator.root(), size); 176 void* ptr = partitionAlloc(allocator.root(), size);
175 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); 177 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
176 partitionFree(ptr); 178 partitionFree(ptr);
177 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); 179 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
178 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); 180 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
179 } 181 }
180 } 182 }
181 183
182 static void CycleGenericFreeCache(size_t size) 184 void CycleGenericFreeCache(size_t size)
183 { 185 {
184 for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) { 186 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
185 void* ptr = partitionAllocGeneric(genericAllocator.root(), size); 187 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
186 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCoo kieFreePointerAdjust(ptr)); 188 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerA djust(ptr));
187 WTF::PartitionBucket* bucket = page->bucket; 189 PartitionBucket* bucket = page->bucket;
188 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); 190 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
189 partitionFreeGeneric(genericAllocator.root(), ptr); 191 partitionFreeGeneric(genericAllocator.root(), ptr);
190 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); 192 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
191 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); 193 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
192 } 194 }
193 } 195 }
194 196
195 class MockPartitionStatsDumper : public WTF::PartitionStatsDumper { 197 class MockPartitionStatsDumper : public PartitionStatsDumper {
196 public: 198 public:
197 MockPartitionStatsDumper() 199 MockPartitionStatsDumper()
198 : m_totalResidentBytes(0) 200 : m_totalResidentBytes(0)
199 , m_totalActiveBytes(0) { } 201 , m_totalActiveBytes(0) { }
200 202
201 virtual void partitionsDumpBucketStats(const char* partitionName, const WTF: :PartitionBucketMemoryStats* memoryStats) override 203 void partitionsDumpBucketStats(const char* partitionName, const PartitionBuc ketMemoryStats* memoryStats) override
202 { 204 {
203 (void) partitionName; 205 (void) partitionName;
204 EXPECT_TRUE(memoryStats->isValid); 206 EXPECT_TRUE(memoryStats->isValid);
205 EXPECT_EQ(0u, memoryStats->bucketSlotSize & WTF::kAllocationGranularityM ask); 207 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask);
206 m_bucketStats.append(*memoryStats); 208 m_bucketStats.append(*memoryStats);
207 m_totalResidentBytes += memoryStats->residentBytes; 209 m_totalResidentBytes += memoryStats->residentBytes;
208 m_totalActiveBytes += memoryStats->activeBytes; 210 m_totalActiveBytes += memoryStats->activeBytes;
209 } 211 }
210 212
211 bool IsMemoryAllocationRecorded() 213 bool IsMemoryAllocationRecorded()
212 { 214 {
213 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0; 215 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0;
214 } 216 }
215 217
216 const WTF::PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize) 218 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize)
217 { 219 {
218 for (size_t i = 0; i < m_bucketStats.size(); ++i) { 220 for (size_t i = 0; i < m_bucketStats.size(); ++i) {
219 if (m_bucketStats[i].bucketSlotSize == bucketSize) 221 if (m_bucketStats[i].bucketSlotSize == bucketSize)
220 return &m_bucketStats[i]; 222 return &m_bucketStats[i];
221 } 223 }
222 return 0; 224 return 0;
223 } 225 }
224 226
225 private: 227 private:
226 size_t m_totalResidentBytes; 228 size_t m_totalResidentBytes;
227 size_t m_totalActiveBytes; 229 size_t m_totalActiveBytes;
228 230
229 Vector<WTF::PartitionBucketMemoryStats> m_bucketStats; 231 Vector<PartitionBucketMemoryStats> m_bucketStats;
230 }; 232 };
231 233
234 } // anonymous namespace
235
232 // Check that the most basic of allocate / free pairs work. 236 // Check that the most basic of allocate / free pairs work.
233 TEST(PartitionAllocTest, Basic) 237 TEST(PartitionAllocTest, Basic)
234 { 238 {
235 TestSetup(); 239 TestSetup();
236 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex ]; 240 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
237 WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage; 241 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage;
238 242
239 EXPECT_FALSE(bucket->emptyPagesHead); 243 EXPECT_FALSE(bucket->emptyPagesHead);
240 EXPECT_EQ(seedPage, bucket->activePagesHead); 244 EXPECT_EQ(seedPage, bucket->activePagesHead);
241 EXPECT_EQ(0, bucket->activePagesHead->nextPage); 245 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
242 246
243 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize); 247 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
244 EXPECT_TRUE(ptr); 248 EXPECT_TRUE(ptr);
245 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPag eOffsetMask); 249 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & kPartitionPageOffs etMask);
246 // Check that the offset appears to include a guard page. 250 // Check that the offset appears to include a guard page.
247 EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t> (ptr) & WTF::kSuperPageOffsetMask); 251 EXPECT_EQ(kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
248 252
249 partitionFree(ptr); 253 partitionFree(ptr);
250 // Expect that the last active page does not get tossed to the freelist. 254 // Expect that the last active page does not get tossed to the freelist.
251 EXPECT_FALSE(bucket->emptyPagesHead); 255 EXPECT_FALSE(bucket->emptyPagesHead);
252 256
253 TestShutdown(); 257 TestShutdown();
254 } 258 }
255 259
256 // Check that we can detect a memory leak. 260 // Check that we can detect a memory leak.
257 TEST(PartitionAllocTest, SimpleLeak) 261 TEST(PartitionAllocTest, SimpleLeak)
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
298 partitionFree(ptr2); 302 partitionFree(ptr2);
299 partitionFree(ptr3); 303 partitionFree(ptr3);
300 304
301 TestShutdown(); 305 TestShutdown();
302 } 306 }
303 307
304 // Test a bucket with multiple pages. 308 // Test a bucket with multiple pages.
305 TEST(PartitionAllocTest, MultiPages) 309 TEST(PartitionAllocTest, MultiPages)
306 { 310 {
307 TestSetup(); 311 TestSetup();
308 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex ]; 312 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
309 313
310 WTF::PartitionPage* page = GetFullPage(kTestAllocSize); 314 PartitionPage* page = GetFullPage(kTestAllocSize);
311 FreeFullPage(page); 315 FreeFullPage(page);
312 EXPECT_FALSE(bucket->emptyPagesHead); 316 EXPECT_FALSE(bucket->emptyPagesHead);
313 EXPECT_EQ(page, bucket->activePagesHead); 317 EXPECT_EQ(page, bucket->activePagesHead);
314 EXPECT_EQ(0, page->nextPage); 318 EXPECT_EQ(0, page->nextPage);
315 EXPECT_EQ(0, page->numAllocatedSlots); 319 EXPECT_EQ(0, page->numAllocatedSlots);
316 320
317 page = GetFullPage(kTestAllocSize); 321 page = GetFullPage(kTestAllocSize);
318 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize); 322 PartitionPage* page2 = GetFullPage(kTestAllocSize);
319 323
320 EXPECT_EQ(page2, bucket->activePagesHead); 324 EXPECT_EQ(page2, bucket->activePagesHead);
321 EXPECT_EQ(0, page2->nextPage); 325 EXPECT_EQ(0, page2->nextPage);
322 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::k SuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask); 326 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & kSuper PageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & kSupe rPageBaseMask);
323 327
324 // Fully free the non-current page. It should not be freelisted because 328 // Fully free the non-current page. It should not be freelisted because
325 // there is no other immediately useable page. The other page is full. 329 // there is no other immediately useable page. The other page is full.
326 FreeFullPage(page); 330 FreeFullPage(page);
327 EXPECT_EQ(0, page->numAllocatedSlots); 331 EXPECT_EQ(0, page->numAllocatedSlots);
328 EXPECT_FALSE(bucket->emptyPagesHead); 332 EXPECT_FALSE(bucket->emptyPagesHead);
329 EXPECT_EQ(page, bucket->activePagesHead); 333 EXPECT_EQ(page, bucket->activePagesHead);
330 334
331 // Allocate a new page, it should pull from the freelist. 335 // Allocate a new page, it should pull from the freelist.
332 page = GetFullPage(kTestAllocSize); 336 page = GetFullPage(kTestAllocSize);
333 EXPECT_FALSE(bucket->emptyPagesHead); 337 EXPECT_FALSE(bucket->emptyPagesHead);
334 EXPECT_EQ(page, bucket->activePagesHead); 338 EXPECT_EQ(page, bucket->activePagesHead);
335 339
336 FreeFullPage(page); 340 FreeFullPage(page);
337 FreeFullPage(page2); 341 FreeFullPage(page2);
338 EXPECT_EQ(0, page->numAllocatedSlots); 342 EXPECT_EQ(0, page->numAllocatedSlots);
339 EXPECT_EQ(0, page2->numAllocatedSlots); 343 EXPECT_EQ(0, page2->numAllocatedSlots);
340 EXPECT_EQ(0, page2->numUnprovisionedSlots); 344 EXPECT_EQ(0, page2->numUnprovisionedSlots);
341 EXPECT_NE(-1, page2->emptyCacheIndex); 345 EXPECT_NE(-1, page2->emptyCacheIndex);
342 346
343 TestShutdown(); 347 TestShutdown();
344 } 348 }
345 349
346 // Test some finer aspects of internal page transitions. 350 // Test some finer aspects of internal page transitions.
347 TEST(PartitionAllocTest, PageTransitions) 351 TEST(PartitionAllocTest, PageTransitions)
348 { 352 {
349 TestSetup(); 353 TestSetup();
350 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex ]; 354 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
351 355
352 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize); 356 PartitionPage* page1 = GetFullPage(kTestAllocSize);
353 EXPECT_EQ(page1, bucket->activePagesHead); 357 EXPECT_EQ(page1, bucket->activePagesHead);
354 EXPECT_EQ(0, page1->nextPage); 358 EXPECT_EQ(0, page1->nextPage);
355 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize); 359 PartitionPage* page2 = GetFullPage(kTestAllocSize);
356 EXPECT_EQ(page2, bucket->activePagesHead); 360 EXPECT_EQ(page2, bucket->activePagesHead);
357 EXPECT_EQ(0, page2->nextPage); 361 EXPECT_EQ(0, page2->nextPage);
358 362
359 // Bounce page1 back into the non-full list then fill it up again. 363 // Bounce page1 back into the non-full list then fill it up again.
360 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointe rOffset; 364 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointe rOffset;
361 partitionFree(ptr); 365 partitionFree(ptr);
362 EXPECT_EQ(page1, bucket->activePagesHead); 366 EXPECT_EQ(page1, bucket->activePagesHead);
363 (void) partitionAlloc(allocator.root(), kTestAllocSize); 367 (void) partitionAlloc(allocator.root(), kTestAllocSize);
364 EXPECT_EQ(page1, bucket->activePagesHead); 368 EXPECT_EQ(page1, bucket->activePagesHead);
365 EXPECT_EQ(page2, bucket->activePagesHead->nextPage); 369 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
366 370
367 // Allocating another page at this point should cause us to scan over page1 371 // Allocating another page at this point should cause us to scan over page1
368 // (which is both full and NOT our current page), and evict it from the 372 // (which is both full and NOT our current page), and evict it from the
369 // freelist. Older code had a O(n^2) condition due to failure to do this. 373 // freelist. Older code had a O(n^2) condition due to failure to do this.
370 WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize); 374 PartitionPage* page3 = GetFullPage(kTestAllocSize);
371 EXPECT_EQ(page3, bucket->activePagesHead); 375 EXPECT_EQ(page3, bucket->activePagesHead);
372 EXPECT_EQ(0, page3->nextPage); 376 EXPECT_EQ(0, page3->nextPage);
373 377
374 // Work out a pointer into page2 and free it. 378 // Work out a pointer into page2 and free it.
375 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffse t; 379 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffse t;
376 partitionFree(ptr); 380 partitionFree(ptr);
377 // Trying to allocate at this time should cause us to cycle around to page2 381 // Trying to allocate at this time should cause us to cycle around to page2
378 // and find the recently freed slot. 382 // and find the recently freed slot.
379 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTes tAllocSize)); 383 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTes tAllocSize));
380 EXPECT_EQ(ptr, newPtr); 384 EXPECT_EQ(ptr, newPtr);
(...skipping 19 matching lines...) Expand all
400 partitionFree(ptr); 404 partitionFree(ptr);
401 405
402 TestShutdown(); 406 TestShutdown();
403 } 407 }
404 408
405 // Test some corner cases relating to page transitions in the internal 409 // Test some corner cases relating to page transitions in the internal
406 // free page list metadata bucket. 410 // free page list metadata bucket.
407 TEST(PartitionAllocTest, FreePageListPageTransitions) 411 TEST(PartitionAllocTest, FreePageListPageTransitions)
408 { 412 {
409 TestSetup(); 413 TestSetup();
410 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex ]; 414 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
411 415
412 size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::Partit ionPage) + kExtraAllocSize); 416 size_t numToFillFreeListPage = kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
413 // The +1 is because we need to account for the fact that the current page 417 // The +1 is because we need to account for the fact that the current page
414 // never gets thrown on the freelist. 418 // never gets thrown on the freelist.
415 ++numToFillFreeListPage; 419 ++numToFillFreeListPage;
416 OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[ numToFillFreeListPage]); 420 OwnPtr<PartitionPage*[]> pages = adoptArrayPtr(new PartitionPage*[numToFillF reeListPage]);
417 421
418 size_t i; 422 size_t i;
419 for (i = 0; i < numToFillFreeListPage; ++i) { 423 for (i = 0; i < numToFillFreeListPage; ++i) {
420 pages[i] = GetFullPage(kTestAllocSize); 424 pages[i] = GetFullPage(kTestAllocSize);
421 } 425 }
422 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); 426 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
423 for (i = 0; i < numToFillFreeListPage; ++i) 427 for (i = 0; i < numToFillFreeListPage; ++i)
424 FreeFullPage(pages[i]); 428 FreeFullPage(pages[i]);
425 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); 429 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
426 EXPECT_NE(-1, bucket->activePagesHead->nextPage->emptyCacheIndex); 430 EXPECT_NE(-1, bucket->activePagesHead->nextPage->emptyCacheIndex);
427 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots); 431 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
428 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots); 432 EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
429 433
430 // Allocate / free in a different bucket size so we get control of a 434 // Allocate / free in a different bucket size so we get control of a
431 // different free page list. We need two pages because one will be the last 435 // different free page list. We need two pages because one will be the last
432 // active page and not get freed. 436 // active page and not get freed.
433 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2); 437 PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
434 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2); 438 PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
435 FreeFullPage(page1); 439 FreeFullPage(page1);
436 FreeFullPage(page2); 440 FreeFullPage(page2);
437 441
438 // If we re-allocate all kTestAllocSize allocations, we'll pull all the 442 // If we re-allocate all kTestAllocSize allocations, we'll pull all the
439 // free pages and end up freeing the first page for free page objects. 443 // free pages and end up freeing the first page for free page objects.
440 // It's getting a bit tricky but a nice re-entrancy is going on: 444 // It's getting a bit tricky but a nice re-entrancy is going on:
441 // alloc(kTestAllocSize) -> pulls page from free page list -> 445 // alloc(kTestAllocSize) -> pulls page from free page list ->
442 // free(PartitionFreepagelistEntry) -> last entry in page freed -> 446 // free(PartitionFreepagelistEntry) -> last entry in page freed ->
443 // alloc(PartitionFreepagelistEntry). 447 // alloc(PartitionFreepagelistEntry).
444 for (i = 0; i < numToFillFreeListPage; ++i) { 448 for (i = 0; i < numToFillFreeListPage; ++i) {
(...skipping 15 matching lines...) Expand all
460 TestShutdown(); 464 TestShutdown();
461 } 465 }
462 466
463 // Test a large series of allocations that cross more than one underlying 467 // Test a large series of allocations that cross more than one underlying
464 // 64KB super page allocation. 468 // 64KB super page allocation.
465 TEST(PartitionAllocTest, MultiPageAllocs) 469 TEST(PartitionAllocTest, MultiPageAllocs)
466 { 470 {
467 TestSetup(); 471 TestSetup();
468 // This is guaranteed to cross a super page boundary because the first 472 // This is guaranteed to cross a super page boundary because the first
469 // partition page "slot" will be taken up by a guard page. 473 // partition page "slot" will be taken up by a guard page.
470 size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage; 474 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
471 // The super page should begin and end in a guard so we one less page in 475 // The super page should begin and end in a guard so we one less page in
472 // order to allocate a single page in the new super page. 476 // order to allocate a single page in the new super page.
473 --numPagesNeeded; 477 --numPagesNeeded;
474 478
475 EXPECT_GT(numPagesNeeded, 1u); 479 EXPECT_GT(numPagesNeeded, 1u);
476 OwnPtr<WTF::PartitionPage*[]> pages; 480 OwnPtr<PartitionPage*[]> pages;
477 pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]); 481 pages = adoptArrayPtr(new PartitionPage*[numPagesNeeded]);
478 uintptr_t firstSuperPageBase = 0; 482 uintptr_t firstSuperPageBase = 0;
479 size_t i; 483 size_t i;
480 for (i = 0; i < numPagesNeeded; ++i) { 484 for (i = 0; i < numPagesNeeded; ++i) {
481 pages[i] = GetFullPage(kTestAllocSize); 485 pages[i] = GetFullPage(kTestAllocSize);
482 void* storagePtr = partitionPageToPointer(pages[i]); 486 void* storagePtr = partitionPageToPointer(pages[i]);
483 if (!i) 487 if (!i)
484 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF:: kSuperPageBaseMask; 488 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & kSupe rPageBaseMask;
485 if (i == numPagesNeeded - 1) { 489 if (i == numPagesNeeded - 1) {
486 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storageP tr) & WTF::kSuperPageBaseMask; 490 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storageP tr) & kSuperPageBaseMask;
487 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storag ePtr) & WTF::kSuperPageOffsetMask; 491 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storag ePtr) & kSuperPageOffsetMask;
488 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase); 492 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
489 // Check that we allocated a guard page for the second page. 493 // Check that we allocated a guard page for the second page.
490 EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset); 494 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset);
491 } 495 }
492 } 496 }
493 for (i = 0; i < numPagesNeeded; ++i) 497 for (i = 0; i < numPagesNeeded; ++i)
494 FreeFullPage(pages[i]); 498 FreeFullPage(pages[i]);
495 499
496 TestShutdown(); 500 TestShutdown();
497 } 501 }
498 502
499 // Test the generic allocation functions that can handle arbitrary sizes and 503 // Test the generic allocation functions that can handle arbitrary sizes and
500 // reallocing etc. 504 // reallocing etc.
501 TEST(PartitionAllocTest, GenericAlloc) 505 TEST(PartitionAllocTest, GenericAlloc)
502 { 506 {
503 TestSetup(); 507 TestSetup();
504 508
505 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1); 509 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
506 EXPECT_TRUE(ptr); 510 EXPECT_TRUE(ptr);
507 partitionFreeGeneric(genericAllocator.root(), ptr); 511 partitionFreeGeneric(genericAllocator.root(), ptr);
508 ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBuckete d + 1); 512 ptr = partitionAllocGeneric(genericAllocator.root(), kGenericMaxBucketed + 1 );
509 EXPECT_TRUE(ptr); 513 EXPECT_TRUE(ptr);
510 partitionFreeGeneric(genericAllocator.root(), ptr); 514 partitionFreeGeneric(genericAllocator.root(), ptr);
511 515
512 ptr = partitionAllocGeneric(genericAllocator.root(), 1); 516 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
513 EXPECT_TRUE(ptr); 517 EXPECT_TRUE(ptr);
514 void* origPtr = ptr; 518 void* origPtr = ptr;
515 char* charPtr = static_cast<char*>(ptr); 519 char* charPtr = static_cast<char*>(ptr);
516 *charPtr = 'A'; 520 *charPtr = 'A';
517 521
518 // Change the size of the realloc, remaining inside the same bucket. 522 // Change the size of the realloc, remaining inside the same bucket.
519 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2); 523 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
520 EXPECT_EQ(ptr, newPtr); 524 EXPECT_EQ(ptr, newPtr);
521 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1); 525 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
522 EXPECT_EQ(ptr, newPtr); 526 EXPECT_EQ(ptr, newPtr);
523 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGeneric SmallestBucket); 527 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmall estBucket);
524 EXPECT_EQ(ptr, newPtr); 528 EXPECT_EQ(ptr, newPtr);
525 529
526 // Change the size of the realloc, switching buckets. 530 // Change the size of the realloc, switching buckets.
527 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGeneric SmallestBucket + 1); 531 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmall estBucket + 1);
528 EXPECT_NE(newPtr, ptr); 532 EXPECT_NE(newPtr, ptr);
529 // Check that the realloc copied correctly. 533 // Check that the realloc copied correctly.
530 char* newCharPtr = static_cast<char*>(newPtr); 534 char* newCharPtr = static_cast<char*>(newPtr);
531 EXPECT_EQ(*newCharPtr, 'A'); 535 EXPECT_EQ(*newCharPtr, 'A');
532 #if ENABLE(ASSERT) 536 #if ENABLE(ASSERT)
533 // Subtle: this checks for an old bug where we copied too much from the 537 // Subtle: this checks for an old bug where we copied too much from the
534 // source of the realloc. The condition can be detected by a trashing of 538 // source of the realloc. The condition can be detected by a trashing of
535 // the uninitialized value in the space of the upsized allocation. 539 // the uninitialized value in the space of the upsized allocation.
536 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket))); 540 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + kGen ericSmallestBucket)));
537 #endif 541 #endif
538 *newCharPtr = 'B'; 542 *newCharPtr = 'B';
539 // The realloc moved. To check that the old allocation was freed, we can 543 // The realloc moved. To check that the old allocation was freed, we can
540 // do an alloc of the old allocation size and check that the old allocation 544 // do an alloc of the old allocation size and check that the old allocation
541 // address is at the head of the freelist and reused. 545 // address is at the head of the freelist and reused.
542 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1); 546 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
543 EXPECT_EQ(reusedPtr, origPtr); 547 EXPECT_EQ(reusedPtr, origPtr);
544 partitionFreeGeneric(genericAllocator.root(), reusedPtr); 548 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
545 549
546 // Downsize the realloc. 550 // Downsize the realloc.
547 ptr = newPtr; 551 ptr = newPtr;
548 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1); 552 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
549 EXPECT_EQ(newPtr, origPtr); 553 EXPECT_EQ(newPtr, origPtr);
550 newCharPtr = static_cast<char*>(newPtr); 554 newCharPtr = static_cast<char*>(newPtr);
551 EXPECT_EQ(*newCharPtr, 'B'); 555 EXPECT_EQ(*newCharPtr, 'B');
552 *newCharPtr = 'C'; 556 *newCharPtr = 'C';
553 557
554 // Upsize the realloc to outside the partition. 558 // Upsize the realloc to outside the partition.
555 ptr = newPtr; 559 ptr = newPtr;
556 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGeneric MaxBucketed + 1); 560 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed + 1);
557 EXPECT_NE(newPtr, ptr); 561 EXPECT_NE(newPtr, ptr);
558 newCharPtr = static_cast<char*>(newPtr); 562 newCharPtr = static_cast<char*>(newPtr);
559 EXPECT_EQ(*newCharPtr, 'C'); 563 EXPECT_EQ(*newCharPtr, 'C');
560 *newCharPtr = 'D'; 564 *newCharPtr = 'D';
561 565
562 // Upsize and downsize the realloc, remaining outside the partition. 566 // Upsize and downsize the realloc, remaining outside the partition.
563 ptr = newPtr; 567 ptr = newPtr;
564 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGeneric MaxBucketed * 10); 568 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed * 10);
565 newCharPtr = static_cast<char*>(newPtr); 569 newCharPtr = static_cast<char*>(newPtr);
566 EXPECT_EQ(*newCharPtr, 'D'); 570 EXPECT_EQ(*newCharPtr, 'D');
567 *newCharPtr = 'E'; 571 *newCharPtr = 'E';
568 ptr = newPtr; 572 ptr = newPtr;
569 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGeneric MaxBucketed * 2); 573 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBu cketed * 2);
570 newCharPtr = static_cast<char*>(newPtr); 574 newCharPtr = static_cast<char*>(newPtr);
571 EXPECT_EQ(*newCharPtr, 'E'); 575 EXPECT_EQ(*newCharPtr, 'E');
572 *newCharPtr = 'F'; 576 *newCharPtr = 'F';
573 577
574 // Downsize the realloc to inside the partition. 578 // Downsize the realloc to inside the partition.
575 ptr = newPtr; 579 ptr = newPtr;
576 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1); 580 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
577 EXPECT_NE(newPtr, ptr); 581 EXPECT_NE(newPtr, ptr);
578 EXPECT_EQ(newPtr, origPtr); 582 EXPECT_EQ(newPtr, origPtr);
579 newCharPtr = static_cast<char*>(newPtr); 583 newCharPtr = static_cast<char*>(newPtr);
580 EXPECT_EQ(*newCharPtr, 'F'); 584 EXPECT_EQ(*newCharPtr, 'F');
581 585
582 partitionFreeGeneric(genericAllocator.root(), newPtr); 586 partitionFreeGeneric(genericAllocator.root(), newPtr);
583 TestShutdown(); 587 TestShutdown();
584 } 588 }
585 589
586 // Test the generic allocation functions can handle some specific sizes of 590 // Test the generic allocation functions can handle some specific sizes of
587 // interest. 591 // interest.
588 TEST(PartitionAllocTest, GenericAllocSizes) 592 TEST(PartitionAllocTest, GenericAllocSizes)
589 { 593 {
590 TestSetup(); 594 TestSetup();
591 595
592 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0); 596 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
593 EXPECT_TRUE(ptr); 597 EXPECT_TRUE(ptr);
594 partitionFreeGeneric(genericAllocator.root(), ptr); 598 partitionFreeGeneric(genericAllocator.root(), ptr);
595 599
596 // kPartitionPageSize is interesting because it results in just one 600 // kPartitionPageSize is interesting because it results in just one
597 // allocation per page, which tripped up some corner cases. 601 // allocation per page, which tripped up some corner cases.
598 size_t size = WTF::kPartitionPageSize - kExtraAllocSize; 602 size_t size = kPartitionPageSize - kExtraAllocSize;
599 ptr = partitionAllocGeneric(genericAllocator.root(), size); 603 ptr = partitionAllocGeneric(genericAllocator.root(), size);
600 EXPECT_TRUE(ptr); 604 EXPECT_TRUE(ptr);
601 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size); 605 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
602 EXPECT_TRUE(ptr2); 606 EXPECT_TRUE(ptr2);
603 partitionFreeGeneric(genericAllocator.root(), ptr); 607 partitionFreeGeneric(genericAllocator.root(), ptr);
604 // Should be freeable at this point. 608 // Should be freeable at this point.
605 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 609 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
606 EXPECT_NE(-1, page->emptyCacheIndex); 610 EXPECT_NE(-1, page->emptyCacheIndex);
607 partitionFreeGeneric(genericAllocator.root(), ptr2); 611 partitionFreeGeneric(genericAllocator.root(), ptr2);
608 612
609 size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WT F::kSystemPageSize) / 2) - kExtraAllocSize; 613 size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - kSystemPageS ize) / 2) - kExtraAllocSize;
610 ptr = partitionAllocGeneric(genericAllocator.root(), size); 614 ptr = partitionAllocGeneric(genericAllocator.root(), size);
611 EXPECT_TRUE(ptr); 615 EXPECT_TRUE(ptr);
612 memset(ptr, 'A', size); 616 memset(ptr, 'A', size);
613 ptr2 = partitionAllocGeneric(genericAllocator.root(), size); 617 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
614 EXPECT_TRUE(ptr2); 618 EXPECT_TRUE(ptr2);
615 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size); 619 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
616 EXPECT_TRUE(ptr3); 620 EXPECT_TRUE(ptr3);
617 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size); 621 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
618 EXPECT_TRUE(ptr4); 622 EXPECT_TRUE(ptr4);
619 623
620 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 624 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
621 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookie FreePointerAdjust(ptr3)); 625 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr3));
622 EXPECT_NE(page, page2); 626 EXPECT_NE(page, page2);
623 627
624 partitionFreeGeneric(genericAllocator.root(), ptr); 628 partitionFreeGeneric(genericAllocator.root(), ptr);
625 partitionFreeGeneric(genericAllocator.root(), ptr3); 629 partitionFreeGeneric(genericAllocator.root(), ptr3);
626 partitionFreeGeneric(genericAllocator.root(), ptr2); 630 partitionFreeGeneric(genericAllocator.root(), ptr2);
627 // Should be freeable at this point. 631 // Should be freeable at this point.
628 EXPECT_NE(-1, page->emptyCacheIndex); 632 EXPECT_NE(-1, page->emptyCacheIndex);
629 EXPECT_EQ(0, page->numAllocatedSlots); 633 EXPECT_EQ(0, page->numAllocatedSlots);
630 EXPECT_EQ(0, page->numUnprovisionedSlots); 634 EXPECT_EQ(0, page->numUnprovisionedSlots);
631 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size); 635 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
(...skipping 14 matching lines...) Expand all
646 partitionFreeGeneric(genericAllocator.root(), ptr3); 650 partitionFreeGeneric(genericAllocator.root(), ptr3);
647 partitionFreeGeneric(genericAllocator.root(), ptr4); 651 partitionFreeGeneric(genericAllocator.root(), ptr4);
648 652
649 // Can we allocate a massive (512MB) size? 653 // Can we allocate a massive (512MB) size?
650 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024); 654 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
651 partitionFreeGeneric(genericAllocator.root(), ptr); 655 partitionFreeGeneric(genericAllocator.root(), ptr);
652 656
653 // Check a more reasonable, but still direct mapped, size. 657 // Check a more reasonable, but still direct mapped, size.
654 // Chop a system page and a byte off to test for rounding errors. 658 // Chop a system page and a byte off to test for rounding errors.
655 size = 20 * 1024 * 1024; 659 size = 20 * 1024 * 1024;
656 size -= WTF::kSystemPageSize; 660 size -= kSystemPageSize;
657 size -= 1; 661 size -= 1;
658 ptr = partitionAllocGeneric(genericAllocator.root(), size); 662 ptr = partitionAllocGeneric(genericAllocator.root(), size);
659 char* charPtr = reinterpret_cast<char*>(ptr); 663 char* charPtr = reinterpret_cast<char*>(ptr);
660 *(charPtr + (size - 1)) = 'A'; 664 *(charPtr + (size - 1)) = 'A';
661 partitionFreeGeneric(genericAllocator.root(), ptr); 665 partitionFreeGeneric(genericAllocator.root(), ptr);
662 666
663 // Can we free null? 667 // Can we free null?
664 partitionFreeGeneric(genericAllocator.root(), 0); 668 partitionFreeGeneric(genericAllocator.root(), 0);
665 669
666 // Do we correctly get a null for a failed allocation? 670 // Do we correctly get a null for a failed allocation?
667 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::Partit ionAllocReturnNull, 3u * 1024 * 1024 * 1024)); 671 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), PartitionAl locReturnNull, 3u * 1024 * 1024 * 1024));
668 672
669 TestShutdown(); 673 TestShutdown();
670 } 674 }
671 675
672 // Test that we can fetch the real allocated size after an allocation. 676 // Test that we can fetch the real allocated size after an allocation.
673 TEST(PartitionAllocTest, GenericAllocGetSize) 677 TEST(PartitionAllocTest, GenericAllocGetSize)
674 { 678 {
675 TestSetup(); 679 TestSetup();
676 680
677 void* ptr; 681 void* ptr;
(...skipping 17 matching lines...) Expand all
695 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize); 699 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
696 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize); 700 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
697 EXPECT_TRUE(ptr); 701 EXPECT_TRUE(ptr);
698 actualSize = partitionAllocGetSize(ptr); 702 actualSize = partitionAllocGetSize(ptr);
699 EXPECT_EQ(predictedSize, actualSize); 703 EXPECT_EQ(predictedSize, actualSize);
700 EXPECT_EQ(requestedSize, actualSize); 704 EXPECT_EQ(requestedSize, actualSize);
701 partitionFreeGeneric(genericAllocator.root(), ptr); 705 partitionFreeGeneric(genericAllocator.root(), ptr);
702 706
703 // Allocate a size that is a system page smaller than a bucket. GetSize() 707 // Allocate a size that is a system page smaller than a bucket. GetSize()
704 // should return a larger size than we asked for now. 708 // should return a larger size than we asked for now.
705 requestedSize = (256 * 1024) - WTF::kSystemPageSize - kExtraAllocSize; 709 requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
706 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize); 710 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
707 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize); 711 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
708 EXPECT_TRUE(ptr); 712 EXPECT_TRUE(ptr);
709 actualSize = partitionAllocGetSize(ptr); 713 actualSize = partitionAllocGetSize(ptr);
710 EXPECT_EQ(predictedSize, actualSize); 714 EXPECT_EQ(predictedSize, actualSize);
711 EXPECT_EQ(requestedSize + WTF::kSystemPageSize, actualSize); 715 EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
712 // Check that we can write at the end of the reported size too. 716 // Check that we can write at the end of the reported size too.
713 char* charPtr = reinterpret_cast<char*>(ptr); 717 char* charPtr = reinterpret_cast<char*>(ptr);
714 *(charPtr + (actualSize - 1)) = 'A'; 718 *(charPtr + (actualSize - 1)) = 'A';
715 partitionFreeGeneric(genericAllocator.root(), ptr); 719 partitionFreeGeneric(genericAllocator.root(), ptr);
716 720
717 // Allocate something very large, and uneven. 721 // Allocate something very large, and uneven.
718 requestedSize = 512 * 1024 * 1024 - 1; 722 requestedSize = 512 * 1024 * 1024 - 1;
719 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize); 723 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedS ize);
720 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize); 724 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
721 EXPECT_TRUE(ptr); 725 EXPECT_TRUE(ptr);
(...skipping 11 matching lines...) Expand all
733 } 737 }
734 738
735 // Test the realloc() contract. 739 // Test the realloc() contract.
736 TEST(PartitionAllocTest, Realloc) 740 TEST(PartitionAllocTest, Realloc)
737 { 741 {
738 TestSetup(); 742 TestSetup();
739 743
740 // realloc(0, size) should be equivalent to malloc(). 744 // realloc(0, size) should be equivalent to malloc().
741 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSi ze); 745 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSi ze);
742 memset(ptr, 'A', kTestAllocSize); 746 memset(ptr, 'A', kTestAllocSize);
743 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 747 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
744 // realloc(ptr, 0) should be equivalent to free(). 748 // realloc(ptr, 0) should be equivalent to free().
745 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0); 749 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
746 EXPECT_EQ(0, ptr2); 750 EXPECT_EQ(0, ptr2);
747 EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead); 751 EXPECT_EQ(partitionCookieFreePointerAdjust(ptr), page->freelistHead);
748 752
749 // Test that growing an allocation with realloc() copies everything from the 753 // Test that growing an allocation with realloc() copies everything from the
750 // old allocation. 754 // old allocation.
751 size_t size = WTF::kSystemPageSize - kExtraAllocSize; 755 size_t size = kSystemPageSize - kExtraAllocSize;
752 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); 756 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
753 ptr = partitionAllocGeneric(genericAllocator.root(), size); 757 ptr = partitionAllocGeneric(genericAllocator.root(), size);
754 memset(ptr, 'A', size); 758 memset(ptr, 'A', size);
755 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1); 759 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
756 EXPECT_NE(ptr, ptr2); 760 EXPECT_NE(ptr, ptr2);
757 char* charPtr2 = static_cast<char*>(ptr2); 761 char* charPtr2 = static_cast<char*>(ptr2);
758 EXPECT_EQ('A', charPtr2[0]); 762 EXPECT_EQ('A', charPtr2[0]);
759 EXPECT_EQ('A', charPtr2[size - 1]); 763 EXPECT_EQ('A', charPtr2[size - 1]);
760 #if ENABLE(ASSERT) 764 #if ENABLE(ASSERT)
761 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr2[size] )); 765 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
762 #endif 766 #endif
763 767
764 // Test that shrinking an allocation with realloc() also copies everything 768 // Test that shrinking an allocation with realloc() also copies everything
765 // from the old allocation. 769 // from the old allocation.
766 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1); 770 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
767 EXPECT_NE(ptr2, ptr); 771 EXPECT_NE(ptr2, ptr);
768 char* charPtr = static_cast<char*>(ptr); 772 char* charPtr = static_cast<char*>(ptr);
769 EXPECT_EQ('A', charPtr[0]); 773 EXPECT_EQ('A', charPtr[0]);
770 EXPECT_EQ('A', charPtr[size - 2]); 774 EXPECT_EQ('A', charPtr[size - 2]);
771 #if ENABLE(ASSERT) 775 #if ENABLE(ASSERT)
772 EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); 776 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])) ;
773 #endif 777 #endif
774 778
775 partitionFreeGeneric(genericAllocator.root(), ptr); 779 partitionFreeGeneric(genericAllocator.root(), ptr);
776 780
777 // Test that shrinking a direct mapped allocation happens in-place. 781 // Test that shrinking a direct mapped allocation happens in-place.
778 size = WTF::kGenericMaxBucketed + 16 * WTF::kSystemPageSize; 782 size = kGenericMaxBucketed + 16 * kSystemPageSize;
779 ptr = partitionAllocGeneric(genericAllocator.root(), size); 783 ptr = partitionAllocGeneric(genericAllocator.root(), size);
780 size_t actualSize = partitionAllocGetSize(ptr); 784 size_t actualSize = partitionAllocGetSize(ptr);
781 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMa xBucketed + 8 * WTF::kSystemPageSize); 785 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBuck eted + 8 * kSystemPageSize);
782 EXPECT_EQ(ptr, ptr2); 786 EXPECT_EQ(ptr, ptr2);
783 EXPECT_EQ(actualSize - 8 * WTF::kSystemPageSize, partitionAllocGetSize(ptr2) ); 787 EXPECT_EQ(actualSize - 8 * kSystemPageSize, partitionAllocGetSize(ptr2));
784 788
785 // Test that a previously in-place shrunk direct mapped allocation can be 789 // Test that a previously in-place shrunk direct mapped allocation can be
786 // expanded up again within its original size. 790 // expanded up again within its original size.
787 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - WTF::kSy stemPageSize); 791 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - kSystemP ageSize);
788 EXPECT_EQ(ptr2, ptr); 792 EXPECT_EQ(ptr2, ptr);
789 EXPECT_EQ(actualSize - WTF::kSystemPageSize, partitionAllocGetSize(ptr)); 793 EXPECT_EQ(actualSize - kSystemPageSize, partitionAllocGetSize(ptr));
790 794
791 // Test that a direct mapped allocation is performed not in-place when the 795 // Test that a direct mapped allocation is performed not in-place when the
792 // new size is small enough. 796 // new size is small enough.
793 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kSystemPag eSize); 797 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kSystemPageSize );
794 EXPECT_NE(ptr, ptr2); 798 EXPECT_NE(ptr, ptr2);
795 799
796 partitionFreeGeneric(genericAllocator.root(), ptr2); 800 partitionFreeGeneric(genericAllocator.root(), ptr2);
797 801
798 TestShutdown(); 802 TestShutdown();
799 } 803 }
800 804
801 // Tests the handing out of freelists for partial pages. 805 // Tests the handing out of freelists for partial pages.
802 TEST(PartitionAllocTest, PartialPageFreelists) 806 TEST(PartitionAllocTest, PartialPageFreelists)
803 { 807 {
804 TestSetup(); 808 TestSetup();
805 809
806 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; 810 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
807 EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExt raAllocSize); 811 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, bigSize + kExtraAllocSiz e);
808 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift; 812 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
809 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; 813 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
810 EXPECT_EQ(0, bucket->emptyPagesHead); 814 EXPECT_EQ(0, bucket->emptyPagesHead);
811 815
812 void* ptr = partitionAlloc(allocator.root(), bigSize); 816 void* ptr = partitionAlloc(allocator.root(), bigSize);
813 EXPECT_TRUE(ptr); 817 EXPECT_TRUE(ptr);
814 818
815 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 819 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
816 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemP ageSize) / (bigSize + kExtraAllocSize); 820 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSi ze) / (bigSize + kExtraAllocSize);
817 EXPECT_EQ(4u, totalSlots); 821 EXPECT_EQ(4u, totalSlots);
818 // The freelist should have one entry, because we were able to exactly fit 822 // The freelist should have one entry, because we were able to exactly fit
819 // one object slot and one freelist pointer (the null that the head points 823 // one object slot and one freelist pointer (the null that the head points
820 // to) into a system page. 824 // to) into a system page.
821 EXPECT_TRUE(page->freelistHead); 825 EXPECT_TRUE(page->freelistHead);
822 EXPECT_EQ(1, page->numAllocatedSlots); 826 EXPECT_EQ(1, page->numAllocatedSlots);
823 EXPECT_EQ(2, page->numUnprovisionedSlots); 827 EXPECT_EQ(2, page->numUnprovisionedSlots);
824 828
825 void* ptr2 = partitionAlloc(allocator.root(), bigSize); 829 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
826 EXPECT_TRUE(ptr2); 830 EXPECT_TRUE(ptr2);
827 EXPECT_FALSE(page->freelistHead); 831 EXPECT_FALSE(page->freelistHead);
828 EXPECT_EQ(2, page->numAllocatedSlots); 832 EXPECT_EQ(2, page->numAllocatedSlots);
829 EXPECT_EQ(2, page->numUnprovisionedSlots); 833 EXPECT_EQ(2, page->numUnprovisionedSlots);
830 834
831 void* ptr3 = partitionAlloc(allocator.root(), bigSize); 835 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
832 EXPECT_TRUE(ptr3); 836 EXPECT_TRUE(ptr3);
833 EXPECT_TRUE(page->freelistHead); 837 EXPECT_TRUE(page->freelistHead);
834 EXPECT_EQ(3, page->numAllocatedSlots); 838 EXPECT_EQ(3, page->numAllocatedSlots);
835 EXPECT_EQ(0, page->numUnprovisionedSlots); 839 EXPECT_EQ(0, page->numUnprovisionedSlots);
836 840
837 void* ptr4 = partitionAlloc(allocator.root(), bigSize); 841 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
838 EXPECT_TRUE(ptr4); 842 EXPECT_TRUE(ptr4);
839 EXPECT_FALSE(page->freelistHead); 843 EXPECT_FALSE(page->freelistHead);
840 EXPECT_EQ(4, page->numAllocatedSlots); 844 EXPECT_EQ(4, page->numAllocatedSlots);
841 EXPECT_EQ(0, page->numUnprovisionedSlots); 845 EXPECT_EQ(0, page->numUnprovisionedSlots);
842 846
843 void* ptr5 = partitionAlloc(allocator.root(), bigSize); 847 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
844 EXPECT_TRUE(ptr5); 848 EXPECT_TRUE(ptr5);
845 849
846 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookie FreePointerAdjust(ptr5)); 850 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr5));
847 EXPECT_EQ(1, page2->numAllocatedSlots); 851 EXPECT_EQ(1, page2->numAllocatedSlots);
848 852
849 // Churn things a little whilst there's a partial page freelist. 853 // Churn things a little whilst there's a partial page freelist.
850 partitionFree(ptr); 854 partitionFree(ptr);
851 ptr = partitionAlloc(allocator.root(), bigSize); 855 ptr = partitionAlloc(allocator.root(), bigSize);
852 void* ptr6 = partitionAlloc(allocator.root(), bigSize); 856 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
853 857
854 partitionFree(ptr); 858 partitionFree(ptr);
855 partitionFree(ptr2); 859 partitionFree(ptr2);
856 partitionFree(ptr3); 860 partitionFree(ptr3);
857 partitionFree(ptr4); 861 partitionFree(ptr4);
858 partitionFree(ptr5); 862 partitionFree(ptr5);
859 partitionFree(ptr6); 863 partitionFree(ptr6);
860 EXPECT_NE(-1, page->emptyCacheIndex); 864 EXPECT_NE(-1, page->emptyCacheIndex);
861 EXPECT_NE(-1, page2->emptyCacheIndex); 865 EXPECT_NE(-1, page2->emptyCacheIndex);
862 EXPECT_TRUE(page2->freelistHead); 866 EXPECT_TRUE(page2->freelistHead);
863 EXPECT_EQ(0, page2->numAllocatedSlots); 867 EXPECT_EQ(0, page2->numAllocatedSlots);
864 868
865 // And test a couple of sizes that do not cross kSystemPageSize with a singl e allocation. 869 // And test a couple of sizes that do not cross kSystemPageSize with a singl e allocation.
866 size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize; 870 size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
867 bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift; 871 bucketIdx = (mediumSize + kExtraAllocSize) >> kBucketShift;
868 bucket = &allocator.root()->buckets()[bucketIdx]; 872 bucket = &allocator.root()->buckets()[bucketIdx];
869 EXPECT_EQ(0, bucket->emptyPagesHead); 873 EXPECT_EQ(0, bucket->emptyPagesHead);
870 874
871 ptr = partitionAlloc(allocator.root(), mediumSize); 875 ptr = partitionAlloc(allocator.root(), mediumSize);
872 EXPECT_TRUE(ptr); 876 EXPECT_TRUE(ptr);
873 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 877 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
874 EXPECT_EQ(1, page->numAllocatedSlots); 878 EXPECT_EQ(1, page->numAllocatedSlots);
875 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize ) / (mediumSize + kExtraAllocSize); 879 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( mediumSize + kExtraAllocSize);
876 size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize ); 880 size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize);
877 EXPECT_EQ(2u, firstPageSlots); 881 EXPECT_EQ(2u, firstPageSlots);
878 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); 882 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
879 883
880 partitionFree(ptr); 884 partitionFree(ptr);
881 885
882 size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize; 886 size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
883 bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift; 887 bucketIdx = (smallSize + kExtraAllocSize) >> kBucketShift;
884 bucket = &allocator.root()->buckets()[bucketIdx]; 888 bucket = &allocator.root()->buckets()[bucketIdx];
885 EXPECT_EQ(0, bucket->emptyPagesHead); 889 EXPECT_EQ(0, bucket->emptyPagesHead);
886 890
887 ptr = partitionAlloc(allocator.root(), smallSize); 891 ptr = partitionAlloc(allocator.root(), smallSize);
888 EXPECT_TRUE(ptr); 892 EXPECT_TRUE(ptr);
889 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 893 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
890 EXPECT_EQ(1, page->numAllocatedSlots); 894 EXPECT_EQ(1, page->numAllocatedSlots);
891 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize ) / (smallSize + kExtraAllocSize); 895 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( smallSize + kExtraAllocSize);
892 firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize); 896 firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize);
893 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); 897 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
894 898
895 partitionFree(ptr); 899 partitionFree(ptr);
896 EXPECT_TRUE(page->freelistHead); 900 EXPECT_TRUE(page->freelistHead);
897 EXPECT_EQ(0, page->numAllocatedSlots); 901 EXPECT_EQ(0, page->numAllocatedSlots);
898 902
899 size_t verySmallSize = 32 - kExtraAllocSize; 903 size_t verySmallSize = 32 - kExtraAllocSize;
900 bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift; 904 bucketIdx = (verySmallSize + kExtraAllocSize) >> kBucketShift;
901 bucket = &allocator.root()->buckets()[bucketIdx]; 905 bucket = &allocator.root()->buckets()[bucketIdx];
902 EXPECT_EQ(0, bucket->emptyPagesHead); 906 EXPECT_EQ(0, bucket->emptyPagesHead);
903 907
904 ptr = partitionAlloc(allocator.root(), verySmallSize); 908 ptr = partitionAlloc(allocator.root(), verySmallSize);
905 EXPECT_TRUE(ptr); 909 EXPECT_TRUE(ptr);
906 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 910 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
907 EXPECT_EQ(1, page->numAllocatedSlots); 911 EXPECT_EQ(1, page->numAllocatedSlots);
908 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize ) / (verySmallSize + kExtraAllocSize); 912 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( verySmallSize + kExtraAllocSize);
909 firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize); 913 firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
910 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); 914 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
911 915
912 partitionFree(ptr); 916 partitionFree(ptr);
913 EXPECT_TRUE(page->freelistHead); 917 EXPECT_TRUE(page->freelistHead);
914 EXPECT_EQ(0, page->numAllocatedSlots); 918 EXPECT_EQ(0, page->numAllocatedSlots);
915 919
916 // And try an allocation size (against the generic allocator) that is 920 // And try an allocation size (against the generic allocator) that is
917 // larger than a system page. 921 // larger than a system page.
918 size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2) ) - kExtraAllocSize; 922 size_t pageAndAHalfSize = (kSystemPageSize + (kSystemPageSize / 2)) - kExtra AllocSize;
919 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize); 923 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
920 EXPECT_TRUE(ptr); 924 EXPECT_TRUE(ptr);
921 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 925 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
922 EXPECT_EQ(1, page->numAllocatedSlots); 926 EXPECT_EQ(1, page->numAllocatedSlots);
923 EXPECT_TRUE(page->freelistHead); 927 EXPECT_TRUE(page->freelistHead);
924 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize ) / (pageAndAHalfSize + kExtraAllocSize); 928 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( pageAndAHalfSize + kExtraAllocSize);
925 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots); 929 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
926 partitionFreeGeneric(genericAllocator.root(), ptr); 930 partitionFreeGeneric(genericAllocator.root(), ptr);
927 931
928 // And then make sure than exactly the page size only faults one page. 932 // And then make sure than exactly the page size only faults one page.
929 size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize; 933 size_t pageSize = kSystemPageSize - kExtraAllocSize;
930 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize); 934 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
931 EXPECT_TRUE(ptr); 935 EXPECT_TRUE(ptr);
932 page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr )); 936 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
933 EXPECT_EQ(1, page->numAllocatedSlots); 937 EXPECT_EQ(1, page->numAllocatedSlots);
934 EXPECT_FALSE(page->freelistHead); 938 EXPECT_FALSE(page->freelistHead);
935 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize ) / (pageSize + kExtraAllocSize); 939 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / ( pageSize + kExtraAllocSize);
936 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots); 940 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
937 partitionFreeGeneric(genericAllocator.root(), ptr); 941 partitionFreeGeneric(genericAllocator.root(), ptr);
938 942
939 TestShutdown(); 943 TestShutdown();
940 } 944 }
941 945
942 // Test some of the fragmentation-resistant properties of the allocator. 946 // Test some of the fragmentation-resistant properties of the allocator.
943 TEST(PartitionAllocTest, PageRefilling) 947 TEST(PartitionAllocTest, PageRefilling)
944 { 948 {
945 TestSetup(); 949 TestSetup();
946 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex ]; 950 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
947 951
948 // Grab two full pages and a non-full page. 952 // Grab two full pages and a non-full page.
949 WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize); 953 PartitionPage* page1 = GetFullPage(kTestAllocSize);
950 WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize); 954 PartitionPage* page2 = GetFullPage(kTestAllocSize);
951 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize); 955 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
952 EXPECT_TRUE(ptr); 956 EXPECT_TRUE(ptr);
953 EXPECT_NE(page1, bucket->activePagesHead); 957 EXPECT_NE(page1, bucket->activePagesHead);
954 EXPECT_NE(page2, bucket->activePagesHead); 958 EXPECT_NE(page2, bucket->activePagesHead);
955 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 959 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
956 EXPECT_EQ(1, page->numAllocatedSlots); 960 EXPECT_EQ(1, page->numAllocatedSlots);
957 961
958 // Work out a pointer into page2 and free it; and then page1 and free it. 962 // Work out a pointer into page2 and free it; and then page1 and free it.
959 char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + k PointerOffset; 963 char* ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPoint erOffset;
960 partitionFree(ptr2); 964 partitionFree(ptr2);
961 ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointe rOffset; 965 ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffs et;
962 partitionFree(ptr2); 966 partitionFree(ptr2);
963 967
964 // If we perform two allocations from the same bucket now, we expect to 968 // If we perform two allocations from the same bucket now, we expect to
965 // refill both the nearly full pages. 969 // refill both the nearly full pages.
966 (void) partitionAlloc(allocator.root(), kTestAllocSize); 970 (void) partitionAlloc(allocator.root(), kTestAllocSize);
967 (void) partitionAlloc(allocator.root(), kTestAllocSize); 971 (void) partitionAlloc(allocator.root(), kTestAllocSize);
968 EXPECT_EQ(1, page->numAllocatedSlots); 972 EXPECT_EQ(1, page->numAllocatedSlots);
969 973
970 FreeFullPage(page2); 974 FreeFullPage(page2);
971 FreeFullPage(page1); 975 FreeFullPage(page1);
972 partitionFree(ptr); 976 partitionFree(ptr);
973 977
974 TestShutdown(); 978 TestShutdown();
975 } 979 }
976 980
977 // Basic tests to ensure that allocations work for partial page buckets. 981 // Basic tests to ensure that allocations work for partial page buckets.
978 TEST(PartitionAllocTest, PartialPages) 982 TEST(PartitionAllocTest, PartialPages)
979 { 983 {
980 TestSetup(); 984 TestSetup();
981 985
982 // Find a size that is backed by a partial partition page. 986 // Find a size that is backed by a partial partition page.
983 size_t size = sizeof(void*); 987 size_t size = sizeof(void*);
984 WTF::PartitionBucket* bucket = 0; 988 PartitionBucket* bucket = 0;
985 while (size < kTestMaxAllocation) { 989 while (size < kTestMaxAllocation) {
986 bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift]; 990 bucket = &allocator.root()->buckets()[size >> kBucketShift];
987 if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartition Page) 991 if (bucket->numSystemPagesPerSlotSpan % kNumSystemPagesPerPartitionPage)
988 break; 992 break;
989 size += sizeof(void*); 993 size += sizeof(void*);
990 } 994 }
991 EXPECT_LT(size, kTestMaxAllocation); 995 EXPECT_LT(size, kTestMaxAllocation);
992 996
993 WTF::PartitionPage* page1 = GetFullPage(size); 997 PartitionPage* page1 = GetFullPage(size);
994 WTF::PartitionPage* page2 = GetFullPage(size); 998 PartitionPage* page2 = GetFullPage(size);
995 FreeFullPage(page2); 999 FreeFullPage(page2);
996 FreeFullPage(page1); 1000 FreeFullPage(page1);
997 1001
998 TestShutdown(); 1002 TestShutdown();
999 } 1003 }
1000 1004
1001 // Test correct handling if our mapping collides with another. 1005 // Test correct handling if our mapping collides with another.
1002 TEST(PartitionAllocTest, MappingCollision) 1006 TEST(PartitionAllocTest, MappingCollision)
1003 { 1007 {
1004 TestSetup(); 1008 TestSetup();
1005 // The -2 is because the first and last partition pages in a super page are 1009 // The -2 is because the first and last partition pages in a super page are
1006 // guard pages. 1010 // guard pages.
1007 size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2; 1011 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
1008 OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::P artitionPage*[numPartitionPagesNeeded]); 1012 OwnPtr<PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new PartitionPa ge*[numPartitionPagesNeeded]);
1009 OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF:: PartitionPage*[numPartitionPagesNeeded]); 1013 OwnPtr<PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new PartitionP age*[numPartitionPagesNeeded]);
1010 1014
1011 size_t i; 1015 size_t i;
1012 for (i = 0; i < numPartitionPagesNeeded; ++i) 1016 for (i = 0; i < numPartitionPagesNeeded; ++i)
1013 firstSuperPagePages[i] = GetFullPage(kTestAllocSize); 1017 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
1014 1018
1015 char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSu perPagePages[0])); 1019 char* pageBase = reinterpret_cast<char*>(partitionPageToPointer(firstSuperPa gePages[0]));
1016 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & W TF::kSuperPageOffsetMask); 1020 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuper PageOffsetMask);
1017 pageBase -= WTF::kPartitionPageSize; 1021 pageBase -= kPartitionPageSize;
1018 // Map a single system page either side of the mapping for our allocations, 1022 // Map a single system page either side of the mapping for our allocations,
1019 // with the goal of tripping up alignment of the next mapping. 1023 // with the goal of tripping up alignment of the next mapping.
1020 void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF ::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::PageInaccess ible); 1024 void* map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocati onGranularity, kPageAllocationGranularity, PageInaccessible);
1021 EXPECT_TRUE(map1); 1025 EXPECT_TRUE(map1);
1022 void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAlloc ationGranularity, WTF::kPageAllocationGranularity, WTF::PageInaccessible); 1026 void* map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularit y, kPageAllocationGranularity, PageInaccessible);
1023 EXPECT_TRUE(map2); 1027 EXPECT_TRUE(map2);
1024 1028
1025 for (i = 0; i < numPartitionPagesNeeded; ++i) 1029 for (i = 0; i < numPartitionPagesNeeded; ++i)
1026 secondSuperPagePages[i] = GetFullPage(kTestAllocSize); 1030 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
1027 1031
1028 WTF::freePages(map1, WTF::kPageAllocationGranularity); 1032 freePages(map1, kPageAllocationGranularity);
1029 WTF::freePages(map2, WTF::kPageAllocationGranularity); 1033 freePages(map2, kPageAllocationGranularity);
1030 1034
1031 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePag es[0])); 1035 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePag es[0]));
1032 EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & W TF::kSuperPageOffsetMask); 1036 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuper PageOffsetMask);
1033 pageBase -= WTF::kPartitionPageSize; 1037 pageBase -= kPartitionPageSize;
1034 // Map a single system page either side of the mapping for our allocations, 1038 // Map a single system page either side of the mapping for our allocations,
1035 // with the goal of tripping up alignment of the next mapping. 1039 // with the goal of tripping up alignment of the next mapping.
1036 map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPag eAllocationGranularity, WTF::kPageAllocationGranularity, WTF::PageAccessible); 1040 map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocationGran ularity, kPageAllocationGranularity, PageAccessible);
1037 EXPECT_TRUE(map1); 1041 EXPECT_TRUE(map1);
1038 map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationG ranularity, WTF::kPageAllocationGranularity, WTF::PageAccessible); 1042 map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPa geAllocationGranularity, PageAccessible);
1039 EXPECT_TRUE(map2); 1043 EXPECT_TRUE(map2);
1040 WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity); 1044 setSystemPagesInaccessible(map1, kPageAllocationGranularity);
1041 WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity); 1045 setSystemPagesInaccessible(map2, kPageAllocationGranularity);
1042 1046
1043 WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize); 1047 PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
1044 WTF::freePages(map1, WTF::kPageAllocationGranularity); 1048 freePages(map1, kPageAllocationGranularity);
1045 WTF::freePages(map2, WTF::kPageAllocationGranularity); 1049 freePages(map2, kPageAllocationGranularity);
1046 1050
1047 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThird SuperPage)) & WTF::kPartitionPageOffsetMask); 1051 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThird SuperPage)) & kPartitionPageOffsetMask);
1048 1052
1049 // And make sure we really did get a page in a new superpage. 1053 // And make sure we really did get a page in a new superpage.
1050 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPageP ages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageTo Pointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask); 1054 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPageP ages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPoint er(pageInThirdSuperPage)) & kSuperPageBaseMask);
1051 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPage Pages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageT oPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask); 1055 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPage Pages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPoin ter(pageInThirdSuperPage)) & kSuperPageBaseMask);
1052 1056
1053 FreeFullPage(pageInThirdSuperPage); 1057 FreeFullPage(pageInThirdSuperPage);
1054 for (i = 0; i < numPartitionPagesNeeded; ++i) { 1058 for (i = 0; i < numPartitionPagesNeeded; ++i) {
1055 FreeFullPage(firstSuperPagePages[i]); 1059 FreeFullPage(firstSuperPagePages[i]);
1056 FreeFullPage(secondSuperPagePages[i]); 1060 FreeFullPage(secondSuperPagePages[i]);
1057 } 1061 }
1058 1062
1059 TestShutdown(); 1063 TestShutdown();
1060 } 1064 }
1061 1065
1062 // Tests that pages in the free page cache do get freed as appropriate. 1066 // Tests that pages in the free page cache do get freed as appropriate.
1063 TEST(PartitionAllocTest, FreeCache) 1067 TEST(PartitionAllocTest, FreeCache)
1064 { 1068 {
1065 TestSetup(); 1069 TestSetup();
1066 1070
1067 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages); 1071 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
1068 1072
1069 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; 1073 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
1070 size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift; 1074 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
1071 WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; 1075 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
1072 1076
1073 void* ptr = partitionAlloc(allocator.root(), bigSize); 1077 void* ptr = partitionAlloc(allocator.root(), bigSize);
1074 EXPECT_TRUE(ptr); 1078 EXPECT_TRUE(ptr);
1075 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 1079 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
1076 EXPECT_EQ(0, bucket->emptyPagesHead); 1080 EXPECT_EQ(0, bucket->emptyPagesHead);
1077 EXPECT_EQ(1, page->numAllocatedSlots); 1081 EXPECT_EQ(1, page->numAllocatedSlots);
1078 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPag es); 1082 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1079 partitionFree(ptr); 1083 partitionFree(ptr);
1080 EXPECT_EQ(0, page->numAllocatedSlots); 1084 EXPECT_EQ(0, page->numAllocatedSlots);
1081 EXPECT_NE(-1, page->emptyCacheIndex); 1085 EXPECT_NE(-1, page->emptyCacheIndex);
1082 EXPECT_TRUE(page->freelistHead); 1086 EXPECT_TRUE(page->freelistHead);
1083 1087
1084 CycleFreeCache(kTestAllocSize); 1088 CycleFreeCache(kTestAllocSize);
1085 1089
1086 // Flushing the cache should have really freed the unused page. 1090 // Flushing the cache should have really freed the unused page.
1087 EXPECT_FALSE(page->freelistHead); 1091 EXPECT_FALSE(page->freelistHead);
1088 EXPECT_EQ(-1, page->emptyCacheIndex); 1092 EXPECT_EQ(-1, page->emptyCacheIndex);
1089 EXPECT_EQ(0, page->numAllocatedSlots); 1093 EXPECT_EQ(0, page->numAllocatedSlots);
1090 WTF::PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kT estBucketIndex]; 1094 PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBu cketIndex];
1091 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * WTF::kSystemPage Size, allocator.root()->totalSizeOfCommittedPages); 1095 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
1092 1096
1093 // Check that an allocation works ok whilst in this state (a free'd page 1097 // Check that an allocation works ok whilst in this state (a free'd page
1094 // as the active pages head). 1098 // as the active pages head).
1095 ptr = partitionAlloc(allocator.root(), bigSize); 1099 ptr = partitionAlloc(allocator.root(), bigSize);
1096 EXPECT_FALSE(bucket->emptyPagesHead); 1100 EXPECT_FALSE(bucket->emptyPagesHead);
1097 partitionFree(ptr); 1101 partitionFree(ptr);
1098 1102
1099 // Also check that a page that is bouncing immediately between empty and 1103 // Also check that a page that is bouncing immediately between empty and
1100 // used does not get freed. 1104 // used does not get freed.
1101 for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) { 1105 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
1102 ptr = partitionAlloc(allocator.root(), bigSize); 1106 ptr = partitionAlloc(allocator.root(), bigSize);
1103 EXPECT_TRUE(page->freelistHead); 1107 EXPECT_TRUE(page->freelistHead);
1104 partitionFree(ptr); 1108 partitionFree(ptr);
1105 EXPECT_TRUE(page->freelistHead); 1109 EXPECT_TRUE(page->freelistHead);
1106 } 1110 }
1107 EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPag es); 1111 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1108 TestShutdown(); 1112 TestShutdown();
1109 } 1113 }
1110 1114
1111 // Tests for a bug we had with losing references to free pages. 1115 // Tests for a bug we had with losing references to free pages.
1112 TEST(PartitionAllocTest, LostFreePagesBug) 1116 TEST(PartitionAllocTest, LostFreePagesBug)
1113 { 1117 {
1114 TestSetup(); 1118 TestSetup();
1115 1119
1116 size_t size = WTF::kPartitionPageSize - kExtraAllocSize; 1120 size_t size = kPartitionPageSize - kExtraAllocSize;
1117 1121
1118 void* ptr = partitionAllocGeneric(genericAllocator.root(), size); 1122 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1119 EXPECT_TRUE(ptr); 1123 EXPECT_TRUE(ptr);
1120 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size); 1124 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1121 EXPECT_TRUE(ptr2); 1125 EXPECT_TRUE(ptr2);
1122 1126
1123 WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieF reePointerAdjust(ptr)); 1127 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
1124 WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookie FreePointerAdjust(ptr2)); 1128 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdju st(ptr2));
1125 WTF::PartitionBucket* bucket = page->bucket; 1129 PartitionBucket* bucket = page->bucket;
1126 1130
1127 EXPECT_EQ(0, bucket->emptyPagesHead); 1131 EXPECT_EQ(0, bucket->emptyPagesHead);
1128 EXPECT_EQ(-1, page->numAllocatedSlots); 1132 EXPECT_EQ(-1, page->numAllocatedSlots);
1129 EXPECT_EQ(1, page2->numAllocatedSlots); 1133 EXPECT_EQ(1, page2->numAllocatedSlots);
1130 1134
1131 partitionFreeGeneric(genericAllocator.root(), ptr); 1135 partitionFreeGeneric(genericAllocator.root(), ptr);
1132 partitionFreeGeneric(genericAllocator.root(), ptr2); 1136 partitionFreeGeneric(genericAllocator.root(), ptr2);
1133 1137
1134 EXPECT_EQ(0, bucket->emptyPagesHead); 1138 EXPECT_EQ(0, bucket->emptyPagesHead);
1135 EXPECT_EQ(0, page->numAllocatedSlots); 1139 EXPECT_EQ(0, page->numAllocatedSlots);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1188 EXPECT_TRUE(SetAddressSpaceLimit()); 1192 EXPECT_TRUE(SetAddressSpaceLimit());
1189 1193
1190 // 512 kB x 12288 == 6 GB 1194 // 512 kB x 12288 == 6 GB
1191 const size_t blockSize = 512 * 1024; 1195 const size_t blockSize = 512 * 1024;
1192 const int numAllocations = 12288; 1196 const int numAllocations = 12288;
1193 1197
1194 void* ptrs[numAllocations]; 1198 void* ptrs[numAllocations];
1195 int i; 1199 int i;
1196 1200
1197 for (i = 0; i < numAllocations; ++i) { 1201 for (i = 0; i < numAllocations; ++i) {
1198 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::Parti tionAllocReturnNull, blockSize); 1202 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionA llocReturnNull, blockSize);
1199 if (!ptrs[i]) { 1203 if (!ptrs[i]) {
1200 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::P artitionAllocReturnNull, blockSize); 1204 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), Partit ionAllocReturnNull, blockSize);
1201 EXPECT_FALSE(ptrs[i]); 1205 EXPECT_FALSE(ptrs[i]);
1202 break; 1206 break;
1203 } 1207 }
1204 } 1208 }
1205 1209
1206 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then 1210 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
1207 // we're not actually testing anything here. 1211 // we're not actually testing anything here.
1208 EXPECT_LT(i, numAllocations); 1212 EXPECT_LT(i, numAllocations);
1209 1213
1210 // Free, reallocate and free again each block we allocated. We do this to 1214 // Free, reallocate and free again each block we allocated. We do this to
1211 // check that freeing memory also works correctly after a failed allocation. 1215 // check that freeing memory also works correctly after a failed allocation.
1212 for (--i; i >= 0; --i) { 1216 for (--i; i >= 0; --i) {
1213 partitionFreeGeneric(genericAllocator.root(), ptrs[i]); 1217 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1214 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), WTF::Parti tionAllocReturnNull, blockSize); 1218 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionA llocReturnNull, blockSize);
1215 EXPECT_TRUE(ptrs[i]); 1219 EXPECT_TRUE(ptrs[i]);
1216 partitionFreeGeneric(genericAllocator.root(), ptrs[i]); 1220 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1217 } 1221 }
1218 1222
1219 EXPECT_TRUE(ClearAddressSpaceLimit()); 1223 EXPECT_TRUE(ClearAddressSpaceLimit());
1220 1224
1221 TestShutdown(); 1225 TestShutdown();
1222 } 1226 }
1223 1227
1224 #endif // !CPU(64BIT) || OS(POSIX) 1228 #endif // !CPU(64BIT) || OS(POSIX)
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1272 TestShutdown(); 1276 TestShutdown();
1273 } 1277 }
1274 1278
1275 // Check that guard pages are present where expected. 1279 // Check that guard pages are present where expected.
1276 TEST(PartitionAllocDeathTest, GuardPages) 1280 TEST(PartitionAllocDeathTest, GuardPages)
1277 { 1281 {
1278 TestSetup(); 1282 TestSetup();
1279 1283
1280 // This large size will result in a direct mapped allocation with guard 1284 // This large size will result in a direct mapped allocation with guard
1281 // pages at either end. 1285 // pages at either end.
1282 size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllo cSize; 1286 size_t size = (kGenericMaxBucketed + kSystemPageSize) - kExtraAllocSize;
1283 void* ptr = partitionAllocGeneric(genericAllocator.root(), size); 1287 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1284 EXPECT_TRUE(ptr); 1288 EXPECT_TRUE(ptr);
1285 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; 1289 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1286 1290
1287 EXPECT_DEATH(*(charPtr - 1) = 'A', ""); 1291 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1288 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); 1292 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1289 1293
1290 partitionFreeGeneric(genericAllocator.root(), ptr); 1294 partitionFreeGeneric(genericAllocator.root(), ptr);
1291 1295
1292 TestShutdown(); 1296 TestShutdown();
1293 } 1297 }
1294 1298
1295 // Check that a bad free() is caught where the free() refers to an unused 1299 // Check that a bad free() is caught where the free() refers to an unused
1296 // partition page of a large allocation. 1300 // partition page of a large allocation.
1297 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage) 1301 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage)
1298 { 1302 {
1299 TestSetup(); 1303 TestSetup();
1300 1304
1301 // This large size will result in a direct mapped allocation with guard 1305 // This large size will result in a direct mapped allocation with guard
1302 // pages at either end. 1306 // pages at either end.
1303 void* ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kPartitionPa geSize * 2); 1307 void* ptr = partitionAllocGeneric(genericAllocator.root(), kPartitionPageSiz e * 2);
1304 EXPECT_TRUE(ptr); 1308 EXPECT_TRUE(ptr);
1305 char* badPtr = reinterpret_cast<char*>(ptr) + WTF::kPartitionPageSize; 1309 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
1306 1310
1307 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); 1311 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), "");
1308 1312
1309 partitionFreeGeneric(genericAllocator.root(), ptr); 1313 partitionFreeGeneric(genericAllocator.root(), ptr);
1310 1314
1311 TestShutdown(); 1315 TestShutdown();
1312 } 1316 }
1313 1317
1314 #endif // !OS(ANDROID) 1318 #endif // !OS(ANDROID)
1315 1319
(...skipping 12 matching lines...) Expand all
1328 } 1332 }
1329 1333
1330 // This series of tests checks the active -> empty -> decommitted states. 1334 // This series of tests checks the active -> empty -> decommitted states.
1331 { 1335 {
1332 void* genericPtr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllocSize); 1336 void* genericPtr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllocSize);
1333 { 1337 {
1334 MockPartitionStatsDumper mockStatsDumperGeneric; 1338 MockPartitionStatsDumper mockStatsDumperGeneric;
1335 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1339 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1336 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1340 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1337 1341
1338 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(2048); 1342 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1339 EXPECT_TRUE(stats); 1343 EXPECT_TRUE(stats);
1340 EXPECT_TRUE(stats->isValid); 1344 EXPECT_TRUE(stats->isValid);
1341 EXPECT_EQ(2048u, stats->bucketSlotSize); 1345 EXPECT_EQ(2048u, stats->bucketSlotSize);
1342 EXPECT_EQ(2048u, stats->activeBytes); 1346 EXPECT_EQ(2048u, stats->activeBytes);
1343 EXPECT_EQ(WTF::kSystemPageSize, stats->residentBytes); 1347 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1344 EXPECT_EQ(0u, stats->freeableBytes); 1348 EXPECT_EQ(0u, stats->freeableBytes);
1345 EXPECT_EQ(0u, stats->numFullPages); 1349 EXPECT_EQ(0u, stats->numFullPages);
1346 EXPECT_EQ(1u, stats->numActivePages); 1350 EXPECT_EQ(1u, stats->numActivePages);
1347 EXPECT_EQ(0u, stats->numEmptyPages); 1351 EXPECT_EQ(0u, stats->numEmptyPages);
1348 EXPECT_EQ(0u, stats->numDecommittedPages); 1352 EXPECT_EQ(0u, stats->numDecommittedPages);
1349 } 1353 }
1350 1354
1351 partitionFreeGeneric(genericAllocator.root(), genericPtr); 1355 partitionFreeGeneric(genericAllocator.root(), genericPtr);
1352 1356
1353 { 1357 {
1354 MockPartitionStatsDumper mockStatsDumperGeneric; 1358 MockPartitionStatsDumper mockStatsDumperGeneric;
1355 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1359 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1356 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1360 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1357 1361
1358 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(2048); 1362 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1359 EXPECT_TRUE(stats); 1363 EXPECT_TRUE(stats);
1360 EXPECT_TRUE(stats->isValid); 1364 EXPECT_TRUE(stats->isValid);
1361 EXPECT_EQ(2048u, stats->bucketSlotSize); 1365 EXPECT_EQ(2048u, stats->bucketSlotSize);
1362 EXPECT_EQ(0u, stats->activeBytes); 1366 EXPECT_EQ(0u, stats->activeBytes);
1363 EXPECT_EQ(WTF::kSystemPageSize, stats->residentBytes); 1367 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1364 EXPECT_EQ(WTF::kSystemPageSize, stats->freeableBytes); 1368 EXPECT_EQ(kSystemPageSize, stats->freeableBytes);
1365 EXPECT_EQ(0u, stats->numFullPages); 1369 EXPECT_EQ(0u, stats->numFullPages);
1366 EXPECT_EQ(0u, stats->numActivePages); 1370 EXPECT_EQ(0u, stats->numActivePages);
1367 EXPECT_EQ(1u, stats->numEmptyPages); 1371 EXPECT_EQ(1u, stats->numEmptyPages);
1368 EXPECT_EQ(0u, stats->numDecommittedPages); 1372 EXPECT_EQ(0u, stats->numDecommittedPages);
1369 } 1373 }
1370 1374
1371 CycleGenericFreeCache(kTestAllocSize); 1375 CycleGenericFreeCache(kTestAllocSize);
1372 1376
1373 { 1377 {
1374 MockPartitionStatsDumper mockStatsDumperGeneric; 1378 MockPartitionStatsDumper mockStatsDumperGeneric;
1375 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1379 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1376 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1380 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1377 1381
1378 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(2048); 1382 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(2048);
1379 EXPECT_TRUE(stats); 1383 EXPECT_TRUE(stats);
1380 EXPECT_TRUE(stats->isValid); 1384 EXPECT_TRUE(stats->isValid);
1381 EXPECT_EQ(2048u, stats->bucketSlotSize); 1385 EXPECT_EQ(2048u, stats->bucketSlotSize);
1382 EXPECT_EQ(0u, stats->activeBytes); 1386 EXPECT_EQ(0u, stats->activeBytes);
1383 EXPECT_EQ(0u, stats->residentBytes); 1387 EXPECT_EQ(0u, stats->residentBytes);
1384 EXPECT_EQ(0u, stats->freeableBytes); 1388 EXPECT_EQ(0u, stats->freeableBytes);
1385 EXPECT_EQ(0u, stats->numFullPages); 1389 EXPECT_EQ(0u, stats->numFullPages);
1386 EXPECT_EQ(0u, stats->numActivePages); 1390 EXPECT_EQ(0u, stats->numActivePages);
1387 EXPECT_EQ(0u, stats->numEmptyPages); 1391 EXPECT_EQ(0u, stats->numEmptyPages);
1388 EXPECT_EQ(1u, stats->numDecommittedPages); 1392 EXPECT_EQ(1u, stats->numDecommittedPages);
1389 } 1393 }
1390 } 1394 }
1391 1395
1392 // This test checks for correct empty page list accounting. 1396 // This test checks for correct empty page list accounting.
1393 { 1397 {
1394 size_t size = WTF::kPartitionPageSize - kExtraAllocSize; 1398 size_t size = kPartitionPageSize - kExtraAllocSize;
1395 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size); 1399 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size);
1396 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size); 1400 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1397 partitionFreeGeneric(genericAllocator.root(), ptr1); 1401 partitionFreeGeneric(genericAllocator.root(), ptr1);
1398 partitionFreeGeneric(genericAllocator.root(), ptr2); 1402 partitionFreeGeneric(genericAllocator.root(), ptr2);
1399 1403
1400 CycleGenericFreeCache(kTestAllocSize); 1404 CycleGenericFreeCache(kTestAllocSize);
1401 1405
1402 ptr1 = partitionAllocGeneric(genericAllocator.root(), size); 1406 ptr1 = partitionAllocGeneric(genericAllocator.root(), size);
1403 1407
1404 { 1408 {
1405 MockPartitionStatsDumper mockStatsDumperGeneric; 1409 MockPartitionStatsDumper mockStatsDumperGeneric;
1406 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1410 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1407 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1411 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1408 1412
1409 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(WTF::kPartitionPageSize); 1413 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(kPartitionPageSize);
1410 EXPECT_TRUE(stats); 1414 EXPECT_TRUE(stats);
1411 EXPECT_TRUE(stats->isValid); 1415 EXPECT_TRUE(stats->isValid);
1412 EXPECT_EQ(WTF::kPartitionPageSize, stats->bucketSlotSize); 1416 EXPECT_EQ(kPartitionPageSize, stats->bucketSlotSize);
1413 EXPECT_EQ(WTF::kPartitionPageSize, stats->activeBytes); 1417 EXPECT_EQ(kPartitionPageSize, stats->activeBytes);
1414 EXPECT_EQ(WTF::kPartitionPageSize, stats->residentBytes); 1418 EXPECT_EQ(kPartitionPageSize, stats->residentBytes);
1415 EXPECT_EQ(0u, stats->freeableBytes); 1419 EXPECT_EQ(0u, stats->freeableBytes);
1416 EXPECT_EQ(1u, stats->numFullPages); 1420 EXPECT_EQ(1u, stats->numFullPages);
1417 EXPECT_EQ(0u, stats->numActivePages); 1421 EXPECT_EQ(0u, stats->numActivePages);
1418 EXPECT_EQ(0u, stats->numEmptyPages); 1422 EXPECT_EQ(0u, stats->numEmptyPages);
1419 EXPECT_EQ(1u, stats->numDecommittedPages); 1423 EXPECT_EQ(1u, stats->numDecommittedPages);
1420 } 1424 }
1421 partitionFreeGeneric(genericAllocator.root(), ptr1); 1425 partitionFreeGeneric(genericAllocator.root(), ptr1);
1422 } 1426 }
1423 1427
1424 // This test checks for correct direct mapped accounting. 1428 // This test checks for correct direct mapped accounting.
1425 { 1429 {
1426 size_t sizeSmaller = WTF::kGenericMaxBucketed + 1; 1430 size_t sizeSmaller = kGenericMaxBucketed + 1;
1427 size_t sizeBigger = (WTF::kGenericMaxBucketed * 2) + 1; 1431 size_t sizeBigger = (kGenericMaxBucketed * 2) + 1;
1428 size_t realSizeSmaller = (sizeSmaller + WTF::kSystemPageOffsetMask) & WT F::kSystemPageBaseMask; 1432 size_t realSizeSmaller = (sizeSmaller + kSystemPageOffsetMask) & kSystem PageBaseMask;
1429 size_t realSizeBigger = (sizeBigger + WTF::kSystemPageOffsetMask) & WTF: :kSystemPageBaseMask; 1433 size_t realSizeBigger = (sizeBigger + kSystemPageOffsetMask) & kSystemPa geBaseMask;
1430 void* ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller); 1434 void* ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller);
1431 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger); 1435 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger);
1432 1436
1433 { 1437 {
1434 MockPartitionStatsDumper mockStatsDumperGeneric; 1438 MockPartitionStatsDumper mockStatsDumperGeneric;
1435 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1439 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1436 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1440 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1437 1441
1438 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(realSizeSmaller); 1442 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(realSizeSmaller);
1439 EXPECT_TRUE(stats); 1443 EXPECT_TRUE(stats);
1440 EXPECT_TRUE(stats->isValid); 1444 EXPECT_TRUE(stats->isValid);
1441 EXPECT_TRUE(stats->isDirectMap); 1445 EXPECT_TRUE(stats->isDirectMap);
1442 EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize); 1446 EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize);
1443 EXPECT_EQ(realSizeSmaller, stats->activeBytes); 1447 EXPECT_EQ(realSizeSmaller, stats->activeBytes);
1444 EXPECT_EQ(realSizeSmaller, stats->residentBytes); 1448 EXPECT_EQ(realSizeSmaller, stats->residentBytes);
1445 EXPECT_EQ(0u, stats->freeableBytes); 1449 EXPECT_EQ(0u, stats->freeableBytes);
1446 EXPECT_EQ(1u, stats->numFullPages); 1450 EXPECT_EQ(1u, stats->numFullPages);
1447 EXPECT_EQ(0u, stats->numActivePages); 1451 EXPECT_EQ(0u, stats->numActivePages);
1448 EXPECT_EQ(0u, stats->numEmptyPages); 1452 EXPECT_EQ(0u, stats->numEmptyPages);
(...skipping 26 matching lines...) Expand all
1475 1479
1476 // This test checks large-but-not-quite-direct allocations. 1480 // This test checks large-but-not-quite-direct allocations.
1477 { 1481 {
1478 void* ptr = partitionAllocGeneric(genericAllocator.root(), 65537); 1482 void* ptr = partitionAllocGeneric(genericAllocator.root(), 65537);
1479 1483
1480 { 1484 {
1481 MockPartitionStatsDumper mockStatsDumperGeneric; 1485 MockPartitionStatsDumper mockStatsDumperGeneric;
1482 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric); 1486 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_all ocator", &mockStatsDumperGeneric);
1483 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1487 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1484 1488
1485 size_t slotSize = 65536 + (65536 / WTF::kGenericNumBucketsPerOrder); 1489 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1486 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneri c.GetBucketStats(slotSize); 1490 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Get BucketStats(slotSize);
1487 EXPECT_TRUE(stats); 1491 EXPECT_TRUE(stats);
1488 EXPECT_TRUE(stats->isValid); 1492 EXPECT_TRUE(stats->isValid);
1489 EXPECT_FALSE(stats->isDirectMap); 1493 EXPECT_FALSE(stats->isDirectMap);
1490 EXPECT_EQ(slotSize, stats->bucketSlotSize); 1494 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1491 EXPECT_EQ(65536 + WTF::kSystemPageSize, stats->activeBytes); 1495 EXPECT_EQ(65536 + kSystemPageSize, stats->activeBytes);
1492 EXPECT_EQ(slotSize, stats->residentBytes); 1496 EXPECT_EQ(slotSize, stats->residentBytes);
1493 EXPECT_EQ(0u, stats->freeableBytes); 1497 EXPECT_EQ(0u, stats->freeableBytes);
1494 EXPECT_EQ(1u, stats->numFullPages); 1498 EXPECT_EQ(1u, stats->numFullPages);
1495 EXPECT_EQ(0u, stats->numActivePages); 1499 EXPECT_EQ(0u, stats->numActivePages);
1496 EXPECT_EQ(0u, stats->numEmptyPages); 1500 EXPECT_EQ(0u, stats->numEmptyPages);
1497 EXPECT_EQ(0u, stats->numDecommittedPages); 1501 EXPECT_EQ(0u, stats->numDecommittedPages);
1498 } 1502 }
1499 1503
1500 partitionFreeGeneric(genericAllocator.root(), ptr); 1504 partitionFreeGeneric(genericAllocator.root(), ptr);
1501 } 1505 }
1502 1506
1503 TestShutdown(); 1507 TestShutdown();
1504 } 1508 }
1505 1509
1506 // Tests the API to purge freeable memory. 1510 // Tests the API to purge freeable memory.
1507 TEST(PartitionAllocTest, Purge) 1511 TEST(PartitionAllocTest, Purge)
1508 { 1512 {
1509 TestSetup(); 1513 TestSetup();
1510 1514
1511 void* ptr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllo cSize); 1515 void* ptr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllo cSize);
1512 partitionFreeGeneric(genericAllocator.root(), ptr); 1516 partitionFreeGeneric(genericAllocator.root(), ptr);
1513 { 1517 {
1514 MockPartitionStatsDumper mockStatsDumperGeneric; 1518 MockPartitionStatsDumper mockStatsDumperGeneric;
1515 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", &mockStatsDumperGeneric); 1519 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", &mockStatsDumperGeneric);
1516 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1520 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1517 1521
1518 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Ge tBucketStats(2048); 1522 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBuck etStats(2048);
1519 EXPECT_TRUE(stats); 1523 EXPECT_TRUE(stats);
1520 EXPECT_TRUE(stats->isValid); 1524 EXPECT_TRUE(stats->isValid);
1521 EXPECT_EQ(WTF::kSystemPageSize, stats->freeableBytes); 1525 EXPECT_EQ(kSystemPageSize, stats->freeableBytes);
1522 EXPECT_EQ(WTF::kSystemPageSize, stats->residentBytes); 1526 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1523 } 1527 }
1524 partitionPurgeMemoryGeneric(genericAllocator.root()); 1528 partitionPurgeMemoryGeneric(genericAllocator.root());
1525 { 1529 {
1526 MockPartitionStatsDumper mockStatsDumperGeneric; 1530 MockPartitionStatsDumper mockStatsDumperGeneric;
1527 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", &mockStatsDumperGeneric); 1531 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocat or", &mockStatsDumperGeneric);
1528 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); 1532 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1529 1533
1530 const WTF::PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.Ge tBucketStats(2048); 1534 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBuck etStats(2048);
1531 EXPECT_TRUE(stats); 1535 EXPECT_TRUE(stats);
1532 EXPECT_TRUE(stats->isValid); 1536 EXPECT_TRUE(stats->isValid);
1533 EXPECT_EQ(0u, stats->freeableBytes); 1537 EXPECT_EQ(0u, stats->freeableBytes);
1534 EXPECT_EQ(0u, stats->residentBytes); 1538 EXPECT_EQ(0u, stats->residentBytes);
1535 } 1539 }
1536 // Calling purge again here is a good way of testing we didn't mess up the 1540 // Calling purge again here is a good way of testing we didn't mess up the
1537 // state of the free cache ring. 1541 // state of the free cache ring.
1538 partitionPurgeMemoryGeneric(genericAllocator.root()); 1542 partitionPurgeMemoryGeneric(genericAllocator.root());
1539 TestShutdown(); 1543 TestShutdown();
1540 } 1544 }
1541 1545
1542 // Tests that the countLeadingZeros() functions work to our satisfaction. 1546 // Tests that the countLeadingZeros() functions work to our satisfaction.
1543 // It doesn't seem worth the overhead of a whole new file for these tests, so 1547 // It doesn't seem worth the overhead of a whole new file for these tests, so
1544 // we'll put them here since partitionAllocGeneric will depend heavily on these 1548 // we'll put them here since partitionAllocGeneric will depend heavily on these
1545 // functions working correctly. 1549 // functions working correctly.
1546 TEST(PartitionAllocTest, CLZWorks) 1550 TEST(PartitionAllocTest, CLZWorks)
1547 { 1551 {
1548 EXPECT_EQ(32u, WTF::countLeadingZeros32(0u)); 1552 EXPECT_EQ(32u, countLeadingZeros32(0u));
1549 EXPECT_EQ(31u, WTF::countLeadingZeros32(1u)); 1553 EXPECT_EQ(31u, countLeadingZeros32(1u));
1550 EXPECT_EQ(1u, WTF::countLeadingZeros32(1u << 30)); 1554 EXPECT_EQ(1u, countLeadingZeros32(1u << 30));
1551 EXPECT_EQ(0u, WTF::countLeadingZeros32(1u << 31)); 1555 EXPECT_EQ(0u, countLeadingZeros32(1u << 31));
1552 1556
1553 #if CPU(64BIT) 1557 #if CPU(64BIT)
1554 EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull)); 1558 EXPECT_EQ(64u, countLeadingZerosSizet(0ull));
1555 EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull)); 1559 EXPECT_EQ(63u, countLeadingZerosSizet(1ull));
1556 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31)); 1560 EXPECT_EQ(32u, countLeadingZerosSizet(1ull << 31));
1557 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62)); 1561 EXPECT_EQ(1u, countLeadingZerosSizet(1ull << 62));
1558 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63)); 1562 EXPECT_EQ(0u, countLeadingZerosSizet(1ull << 63));
1559 #else 1563 #else
1560 EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0u)); 1564 EXPECT_EQ(32u, countLeadingZerosSizet(0u));
1561 EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1u)); 1565 EXPECT_EQ(31u, countLeadingZerosSizet(1u));
1562 EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1u << 30)); 1566 EXPECT_EQ(1u, countLeadingZerosSizet(1u << 30));
1563 EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1u << 31)); 1567 EXPECT_EQ(0u, countLeadingZerosSizet(1u << 31));
1564 #endif 1568 #endif
1565 } 1569 }
1566 1570
1567 } // namespace 1571 } // namespace WTF
1568 1572
1569 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 1573 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
OLDNEW
« no previous file with comments | « Source/wtf/MathExtrasTest.cpp ('k') | Source/wtf/RefPtrTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698