| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/allocator/partition_allocator/partition_alloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
| 6 | 6 |
| 7 #include <stdlib.h> | 7 #include <stdlib.h> |
| 8 #include <string.h> | 8 #include <string.h> |
| 9 | 9 |
| 10 #include <memory> | 10 #include <memory> |
| (...skipping 22 matching lines...) Expand all Loading... |
| 33 } // namespace | 33 } // namespace |
| 34 | 34 |
| 35 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 35 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 36 | 36 |
| 37 namespace base { | 37 namespace base { |
| 38 | 38 |
| 39 namespace { | 39 namespace { |
| 40 | 40 |
| 41 const size_t kTestMaxAllocation = 4096; | 41 const size_t kTestMaxAllocation = 4096; |
| 42 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; | 42 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; |
| 43 PartitionAllocatorGeneric genericAllocator; | 43 PartitionAllocatorGeneric generic_allocator; |
| 44 | 44 |
| 45 const size_t kTestAllocSize = 16; | 45 const size_t kTestAllocSize = 16; |
| 46 #if !DCHECK_IS_ON() | 46 #if !DCHECK_IS_ON() |
| 47 const size_t kPointerOffset = 0; | 47 const size_t kPointerOffset = 0; |
| 48 const size_t kExtraAllocSize = 0; | 48 const size_t kExtraAllocSize = 0; |
| 49 #else | 49 #else |
| 50 const size_t kPointerOffset = kCookieSize; | 50 const size_t kPointerOffset = kCookieSize; |
| 51 const size_t kExtraAllocSize = kCookieSize * 2; | 51 const size_t kExtraAllocSize = kCookieSize * 2; |
| 52 #endif | 52 #endif |
| 53 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; | 53 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; |
| 54 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; | 54 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; |
| 55 | 55 |
| 56 const char* typeName = nullptr; | 56 const char* type_name = nullptr; |
| 57 | 57 |
| 58 void TestSetup() { | 58 void TestSetup() { |
| 59 allocator.init(); | 59 allocator.init(); |
| 60 genericAllocator.init(); | 60 generic_allocator.init(); |
| 61 } | 61 } |
| 62 | 62 |
| 63 void TestShutdown() { | 63 void TestShutdown() { |
| 64 // We expect no leaks in the general case. We have a test for leak | 64 // We expect no leaks in the general case. We have a test for leak |
| 65 // detection. | 65 // detection. |
| 66 EXPECT_TRUE(allocator.shutdown()); | 66 EXPECT_TRUE(allocator.shutdown()); |
| 67 EXPECT_TRUE(genericAllocator.shutdown()); | 67 EXPECT_TRUE(generic_allocator.shutdown()); |
| 68 } | 68 } |
| 69 | 69 |
| 70 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) | 70 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
| 71 bool SetAddressSpaceLimit() { | 71 bool SetAddressSpaceLimit() { |
| 72 #if !defined(ARCH_CPU_64_BITS) | 72 #if !defined(ARCH_CPU_64_BITS) |
| 73 // 32 bits => address space is limited already. | 73 // 32 bits => address space is limited already. |
| 74 return true; | 74 return true; |
| 75 #elif defined(OS_POSIX) && !defined(OS_MACOSX) | 75 #elif defined(OS_POSIX) && !defined(OS_MACOSX) |
| 76 // Mac will accept RLIMIT_AS changes but it is not enforced. | 76 // Mac will accept RLIMIT_AS changes but it is not enforced. |
| 77 // See https://crbug.com/435269 and rdar://17576114. | 77 // See https://crbug.com/435269 and rdar://17576114. |
| (...skipping 26 matching lines...) Expand all Loading... |
| 104 if (setrlimit(RLIMIT_AS, &limit) != 0) | 104 if (setrlimit(RLIMIT_AS, &limit) != 0) |
| 105 return false; | 105 return false; |
| 106 return true; | 106 return true; |
| 107 #else | 107 #else |
| 108 return false; | 108 return false; |
| 109 #endif | 109 #endif |
| 110 } | 110 } |
| 111 #endif | 111 #endif |
| 112 | 112 |
| 113 PartitionPage* GetFullPage(size_t size) { | 113 PartitionPage* GetFullPage(size_t size) { |
| 114 size_t realSize = size + kExtraAllocSize; | 114 size_t real_size = size + kExtraAllocSize; |
| 115 size_t bucketIdx = realSize >> kBucketShift; | 115 size_t bucket_index = real_size >> kBucketShift; |
| 116 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; | 116 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 117 size_t numSlots = | 117 size_t num_slots = |
| 118 (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / realSize; | 118 (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size; |
| 119 void* first = 0; | 119 void* first = 0; |
| 120 void* last = 0; | 120 void* last = 0; |
| 121 size_t i; | 121 size_t i; |
| 122 for (i = 0; i < numSlots; ++i) { | 122 for (i = 0; i < num_slots; ++i) { |
| 123 void* ptr = partitionAlloc(allocator.root(), size, typeName); | 123 void* ptr = PartitionAlloc(allocator.root(), size, type_name); |
| 124 EXPECT_TRUE(ptr); | 124 EXPECT_TRUE(ptr); |
| 125 if (!i) | 125 if (!i) |
| 126 first = partitionCookieFreePointerAdjust(ptr); | 126 first = PartitionCookieFreePointerAdjust(ptr); |
| 127 else if (i == numSlots - 1) | 127 else if (i == num_slots - 1) |
| 128 last = partitionCookieFreePointerAdjust(ptr); | 128 last = PartitionCookieFreePointerAdjust(ptr); |
| 129 } | 129 } |
| 130 EXPECT_EQ(partitionPointerToPage(first), partitionPointerToPage(last)); | 130 EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last)); |
| 131 if (bucket->numSystemPagesPerSlotSpan == kNumSystemPagesPerPartitionPage) | 131 if (bucket->num_system_pages_per_slot_span == kNumSystemPagesPerPartitionPage) |
| 132 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, | 132 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, |
| 133 reinterpret_cast<size_t>(last) & kPartitionPageBaseMask); | 133 reinterpret_cast<size_t>(last) & kPartitionPageBaseMask); |
| 134 EXPECT_EQ(numSlots, | 134 EXPECT_EQ(num_slots, static_cast<size_t>( |
| 135 static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots)); | 135 bucket->active_pages_head->num_allocated_slots)); |
| 136 EXPECT_EQ(0, bucket->activePagesHead->freelistHead); | 136 EXPECT_EQ(0, bucket->active_pages_head->freelist_head); |
| 137 EXPECT_TRUE(bucket->activePagesHead); | 137 EXPECT_TRUE(bucket->active_pages_head); |
| 138 EXPECT_TRUE(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); | 138 EXPECT_TRUE(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage); |
| 139 return bucket->activePagesHead; | 139 return bucket->active_pages_head; |
| 140 } | 140 } |
| 141 | 141 |
| 142 void FreeFullPage(PartitionPage* page) { | 142 void FreeFullPage(PartitionPage* page) { |
| 143 size_t size = page->bucket->slotSize; | 143 size_t size = page->bucket->slot_size; |
| 144 size_t numSlots = | 144 size_t num_slots = |
| 145 (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / size; | 145 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size; |
| 146 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots))); | 146 EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots))); |
| 147 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 147 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 148 size_t i; | 148 size_t i; |
| 149 for (i = 0; i < numSlots; ++i) { | 149 for (i = 0; i < num_slots; ++i) { |
| 150 partitionFree(ptr + kPointerOffset); | 150 PartitionFree(ptr + kPointerOffset); |
| 151 ptr += size; | 151 ptr += size; |
| 152 } | 152 } |
| 153 } | 153 } |
| 154 | 154 |
| 155 void CycleFreeCache(size_t size) { | 155 void CycleFreeCache(size_t size) { |
| 156 size_t realSize = size + kExtraAllocSize; | 156 size_t real_size = size + kExtraAllocSize; |
| 157 size_t bucketIdx = realSize >> kBucketShift; | 157 size_t bucket_index = real_size >> kBucketShift; |
| 158 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; | 158 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 159 DCHECK(!bucket->activePagesHead->numAllocatedSlots); | 159 DCHECK(!bucket->active_pages_head->num_allocated_slots); |
| 160 | 160 |
| 161 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 161 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 162 void* ptr = partitionAlloc(allocator.root(), size, typeName); | 162 void* ptr = PartitionAlloc(allocator.root(), size, type_name); |
| 163 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); | 163 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); |
| 164 partitionFree(ptr); | 164 PartitionFree(ptr); |
| 165 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); | 165 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); |
| 166 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); | 166 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); |
| 167 } | 167 } |
| 168 } | 168 } |
| 169 | 169 |
| 170 void CycleGenericFreeCache(size_t size) { | 170 void CycleGenericFreeCache(size_t size) { |
| 171 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 171 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 172 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 172 void* ptr = |
| 173 PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 173 PartitionPage* page = | 174 PartitionPage* page = |
| 174 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 175 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 175 PartitionBucket* bucket = page->bucket; | 176 PartitionBucket* bucket = page->bucket; |
| 176 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); | 177 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); |
| 177 partitionFreeGeneric(genericAllocator.root(), ptr); | 178 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 178 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); | 179 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); |
| 179 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); | 180 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); |
| 180 } | 181 } |
| 181 } | 182 } |
| 182 | 183 |
| 183 void CheckPageInCore(void* ptr, bool inCore) { | 184 void CheckPageInCore(void* ptr, bool inCore) { |
| 184 #if defined(OS_LINUX) | 185 #if defined(OS_LINUX) |
| 185 unsigned char ret; | 186 unsigned char ret; |
| 186 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); | 187 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); |
| 187 EXPECT_EQ(inCore, ret); | 188 EXPECT_EQ(inCore, ret); |
| 188 #endif | 189 #endif |
| 189 } | 190 } |
| 190 | 191 |
| 191 bool IsLargeMemoryDevice() { | 192 bool IsLargeMemoryDevice() { |
| 192 return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024; | 193 return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024; |
| 193 } | 194 } |
| 194 | 195 |
| 195 class MockPartitionStatsDumper : public PartitionStatsDumper { | 196 class MockPartitionStatsDumper : public PartitionStatsDumper { |
| 196 public: | 197 public: |
| 197 MockPartitionStatsDumper() | 198 MockPartitionStatsDumper() |
| 198 : m_totalResidentBytes(0), | 199 : total_resident_bytes(0), |
| 199 m_totalActiveBytes(0), | 200 total_active_bytes(0), |
| 200 m_totalDecommittableBytes(0), | 201 total_decommittable_bytes(0), |
| 201 m_totalDiscardableBytes(0) {} | 202 total_discardable_bytes(0) {} |
| 202 | 203 |
| 203 void partitionDumpTotals(const char* partitionName, | 204 void PartitionDumpTotals(const char* partition_name, |
| 204 const PartitionMemoryStats* memoryStats) override { | 205 const PartitionMemoryStats* stats) override { |
| 205 EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentBytes); | 206 EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes); |
| 206 EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes); | 207 EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes); |
| 207 EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes); | 208 EXPECT_EQ(total_active_bytes, stats->total_active_bytes); |
| 208 EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableBytes); | 209 EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes); |
| 209 EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes); | 210 EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes); |
| 210 } | 211 } |
| 211 | 212 |
| 212 void partitionsDumpBucketStats( | 213 void PartitionsDumpBucketStats( |
| 213 const char* partitionName, | 214 const char* partition_name, |
| 214 const PartitionBucketMemoryStats* memoryStats) override { | 215 const PartitionBucketMemoryStats* stats) override { |
| 215 (void)partitionName; | 216 (void)partition_name; |
| 216 EXPECT_TRUE(memoryStats->isValid); | 217 EXPECT_TRUE(stats->is_valid); |
| 217 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask); | 218 EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask); |
| 218 m_bucketStats.push_back(*memoryStats); | 219 bucket_stats.push_back(*stats); |
| 219 m_totalResidentBytes += memoryStats->residentBytes; | 220 total_resident_bytes += stats->resident_bytes; |
| 220 m_totalActiveBytes += memoryStats->activeBytes; | 221 total_active_bytes += stats->active_bytes; |
| 221 m_totalDecommittableBytes += memoryStats->decommittableBytes; | 222 total_decommittable_bytes += stats->decommittable_bytes; |
| 222 m_totalDiscardableBytes += memoryStats->discardableBytes; | 223 total_discardable_bytes += stats->discardable_bytes; |
| 223 } | 224 } |
| 224 | 225 |
| 225 bool IsMemoryAllocationRecorded() { | 226 bool IsMemoryAllocationRecorded() { |
| 226 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0; | 227 return total_resident_bytes != 0 && total_active_bytes != 0; |
| 227 } | 228 } |
| 228 | 229 |
| 229 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize) { | 230 const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) { |
| 230 for (size_t i = 0; i < m_bucketStats.size(); ++i) { | 231 for (size_t i = 0; i < bucket_stats.size(); ++i) { |
| 231 if (m_bucketStats[i].bucketSlotSize == bucketSize) | 232 if (bucket_stats[i].bucket_slot_size == bucket_size) |
| 232 return &m_bucketStats[i]; | 233 return &bucket_stats[i]; |
| 233 } | 234 } |
| 234 return 0; | 235 return 0; |
| 235 } | 236 } |
| 236 | 237 |
| 237 private: | 238 private: |
| 238 size_t m_totalResidentBytes; | 239 size_t total_resident_bytes; |
| 239 size_t m_totalActiveBytes; | 240 size_t total_active_bytes; |
| 240 size_t m_totalDecommittableBytes; | 241 size_t total_decommittable_bytes; |
| 241 size_t m_totalDiscardableBytes; | 242 size_t total_discardable_bytes; |
| 242 | 243 |
| 243 std::vector<PartitionBucketMemoryStats> m_bucketStats; | 244 std::vector<PartitionBucketMemoryStats> bucket_stats; |
| 244 }; | 245 }; |
| 245 | 246 |
| 246 } // anonymous namespace | 247 } // anonymous namespace |
| 247 | 248 |
| 248 // Check that the most basic of allocate / free pairs work. | 249 // Check that the most basic of allocate / free pairs work. |
| 249 TEST(PartitionAllocTest, Basic) { | 250 TEST(PartitionAllocTest, Basic) { |
| 250 TestSetup(); | 251 TestSetup(); |
| 251 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 252 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 252 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; | 253 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; |
| 253 | 254 |
| 254 EXPECT_FALSE(bucket->emptyPagesHead); | 255 EXPECT_FALSE(bucket->empty_pages_head); |
| 255 EXPECT_FALSE(bucket->decommittedPagesHead); | 256 EXPECT_FALSE(bucket->decommitted_pages_head); |
| 256 EXPECT_EQ(seedPage, bucket->activePagesHead); | 257 EXPECT_EQ(seedPage, bucket->active_pages_head); |
| 257 EXPECT_EQ(0, bucket->activePagesHead->nextPage); | 258 EXPECT_EQ(0, bucket->active_pages_head->next_page); |
| 258 | 259 |
| 259 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 260 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 260 EXPECT_TRUE(ptr); | 261 EXPECT_TRUE(ptr); |
| 261 EXPECT_EQ(kPointerOffset, | 262 EXPECT_EQ(kPointerOffset, |
| 262 reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask); | 263 reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask); |
| 263 // Check that the offset appears to include a guard page. | 264 // Check that the offset appears to include a guard page. |
| 264 EXPECT_EQ(kPartitionPageSize + kPointerOffset, | 265 EXPECT_EQ(kPartitionPageSize + kPointerOffset, |
| 265 reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); | 266 reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); |
| 266 | 267 |
| 267 partitionFree(ptr); | 268 PartitionFree(ptr); |
| 268 // Expect that the last active page gets noticed as empty but doesn't get | 269 // Expect that the last active page gets noticed as empty but doesn't get |
| 269 // decommitted. | 270 // decommitted. |
| 270 EXPECT_TRUE(bucket->emptyPagesHead); | 271 EXPECT_TRUE(bucket->empty_pages_head); |
| 271 EXPECT_FALSE(bucket->decommittedPagesHead); | 272 EXPECT_FALSE(bucket->decommitted_pages_head); |
| 272 | 273 |
| 273 TestShutdown(); | 274 TestShutdown(); |
| 274 } | 275 } |
| 275 | 276 |
| 276 // Check that we can detect a memory leak. | 277 // Check that we can detect a memory leak. |
| 277 TEST(PartitionAllocTest, SimpleLeak) { | 278 TEST(PartitionAllocTest, SimpleLeak) { |
| 278 TestSetup(); | 279 TestSetup(); |
| 279 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 280 void* leakedPtr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 280 (void)leakedPtr; | 281 (void)leakedPtr; |
| 281 void* leakedPtr2 = | 282 void* leakedPtr2 = PartitionAllocGeneric(generic_allocator.root(), |
| 282 partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); | 283 kTestAllocSize, type_name); |
| 283 (void)leakedPtr2; | 284 (void)leakedPtr2; |
| 284 EXPECT_FALSE(allocator.shutdown()); | 285 EXPECT_FALSE(allocator.shutdown()); |
| 285 EXPECT_FALSE(genericAllocator.shutdown()); | 286 EXPECT_FALSE(generic_allocator.shutdown()); |
| 286 } | 287 } |
| 287 | 288 |
| 288 // Test multiple allocations, and freelist handling. | 289 // Test multiple allocations, and freelist handling. |
| 289 TEST(PartitionAllocTest, MultiAlloc) { | 290 TEST(PartitionAllocTest, MultiAlloc) { |
| 290 TestSetup(); | 291 TestSetup(); |
| 291 | 292 |
| 292 char* ptr1 = reinterpret_cast<char*>( | 293 char* ptr1 = reinterpret_cast<char*>( |
| 293 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 294 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 294 char* ptr2 = reinterpret_cast<char*>( | 295 char* ptr2 = reinterpret_cast<char*>( |
| 295 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 296 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 296 EXPECT_TRUE(ptr1); | 297 EXPECT_TRUE(ptr1); |
| 297 EXPECT_TRUE(ptr2); | 298 EXPECT_TRUE(ptr2); |
| 298 ptrdiff_t diff = ptr2 - ptr1; | 299 ptrdiff_t diff = ptr2 - ptr1; |
| 299 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); | 300 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); |
| 300 | 301 |
| 301 // Check that we re-use the just-freed slot. | 302 // Check that we re-use the just-freed slot. |
| 302 partitionFree(ptr2); | 303 PartitionFree(ptr2); |
| 303 ptr2 = reinterpret_cast<char*>( | 304 ptr2 = reinterpret_cast<char*>( |
| 304 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 305 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 305 EXPECT_TRUE(ptr2); | 306 EXPECT_TRUE(ptr2); |
| 306 diff = ptr2 - ptr1; | 307 diff = ptr2 - ptr1; |
| 307 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); | 308 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); |
| 308 partitionFree(ptr1); | 309 PartitionFree(ptr1); |
| 309 ptr1 = reinterpret_cast<char*>( | 310 ptr1 = reinterpret_cast<char*>( |
| 310 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 311 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 311 EXPECT_TRUE(ptr1); | 312 EXPECT_TRUE(ptr1); |
| 312 diff = ptr2 - ptr1; | 313 diff = ptr2 - ptr1; |
| 313 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); | 314 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); |
| 314 | 315 |
| 315 char* ptr3 = reinterpret_cast<char*>( | 316 char* ptr3 = reinterpret_cast<char*>( |
| 316 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 317 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 317 EXPECT_TRUE(ptr3); | 318 EXPECT_TRUE(ptr3); |
| 318 diff = ptr3 - ptr1; | 319 diff = ptr3 - ptr1; |
| 319 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); | 320 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); |
| 320 | 321 |
| 321 partitionFree(ptr1); | 322 PartitionFree(ptr1); |
| 322 partitionFree(ptr2); | 323 PartitionFree(ptr2); |
| 323 partitionFree(ptr3); | 324 PartitionFree(ptr3); |
| 324 | 325 |
| 325 TestShutdown(); | 326 TestShutdown(); |
| 326 } | 327 } |
| 327 | 328 |
| 328 // Test a bucket with multiple pages. | 329 // Test a bucket with multiple pages. |
| 329 TEST(PartitionAllocTest, MultiPages) { | 330 TEST(PartitionAllocTest, MultiPages) { |
| 330 TestSetup(); | 331 TestSetup(); |
| 331 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 332 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 332 | 333 |
| 333 PartitionPage* page = GetFullPage(kTestAllocSize); | 334 PartitionPage* page = GetFullPage(kTestAllocSize); |
| 334 FreeFullPage(page); | 335 FreeFullPage(page); |
| 335 EXPECT_TRUE(bucket->emptyPagesHead); | 336 EXPECT_TRUE(bucket->empty_pages_head); |
| 336 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 337 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 337 EXPECT_EQ(0, page->nextPage); | 338 EXPECT_EQ(0, page->next_page); |
| 338 EXPECT_EQ(0, page->numAllocatedSlots); | 339 EXPECT_EQ(0, page->num_allocated_slots); |
| 339 | 340 |
| 340 page = GetFullPage(kTestAllocSize); | 341 page = GetFullPage(kTestAllocSize); |
| 341 PartitionPage* page2 = GetFullPage(kTestAllocSize); | 342 PartitionPage* page2 = GetFullPage(kTestAllocSize); |
| 342 | 343 |
| 343 EXPECT_EQ(page2, bucket->activePagesHead); | 344 EXPECT_EQ(page2, bucket->active_pages_head); |
| 344 EXPECT_EQ(0, page2->nextPage); | 345 EXPECT_EQ(0, page2->next_page); |
| 345 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & | 346 EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPageToPointer(page)) & |
| 346 kSuperPageBaseMask, | 347 kSuperPageBaseMask, |
| 347 reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & | 348 reinterpret_cast<uintptr_t>(PartitionPageToPointer(page2)) & |
| 348 kSuperPageBaseMask); | 349 kSuperPageBaseMask); |
| 349 | 350 |
| 350 // Fully free the non-current page. This will leave us with no current | 351 // Fully free the non-current page. This will leave us with no current |
| 351 // active page because one is empty and the other is full. | 352 // active page because one is empty and the other is full. |
| 352 FreeFullPage(page); | 353 FreeFullPage(page); |
| 353 EXPECT_EQ(0, page->numAllocatedSlots); | 354 EXPECT_EQ(0, page->num_allocated_slots); |
| 354 EXPECT_TRUE(bucket->emptyPagesHead); | 355 EXPECT_TRUE(bucket->empty_pages_head); |
| 355 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 356 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 356 | 357 |
| 357 // Allocate a new page, it should pull from the freelist. | 358 // Allocate a new page, it should pull from the freelist. |
| 358 page = GetFullPage(kTestAllocSize); | 359 page = GetFullPage(kTestAllocSize); |
| 359 EXPECT_FALSE(bucket->emptyPagesHead); | 360 EXPECT_FALSE(bucket->empty_pages_head); |
| 360 EXPECT_EQ(page, bucket->activePagesHead); | 361 EXPECT_EQ(page, bucket->active_pages_head); |
| 361 | 362 |
| 362 FreeFullPage(page); | 363 FreeFullPage(page); |
| 363 FreeFullPage(page2); | 364 FreeFullPage(page2); |
| 364 EXPECT_EQ(0, page->numAllocatedSlots); | 365 EXPECT_EQ(0, page->num_allocated_slots); |
| 365 EXPECT_EQ(0, page2->numAllocatedSlots); | 366 EXPECT_EQ(0, page2->num_allocated_slots); |
| 366 EXPECT_EQ(0, page2->numUnprovisionedSlots); | 367 EXPECT_EQ(0, page2->num_unprovisioned_slots); |
| 367 EXPECT_NE(-1, page2->emptyCacheIndex); | 368 EXPECT_NE(-1, page2->empty_cache_index); |
| 368 | 369 |
| 369 TestShutdown(); | 370 TestShutdown(); |
| 370 } | 371 } |
| 371 | 372 |
| 372 // Test some finer aspects of internal page transitions. | 373 // Test some finer aspects of internal page transitions. |
| 373 TEST(PartitionAllocTest, PageTransitions) { | 374 TEST(PartitionAllocTest, PageTransitions) { |
| 374 TestSetup(); | 375 TestSetup(); |
| 375 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 376 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 376 | 377 |
| 377 PartitionPage* page1 = GetFullPage(kTestAllocSize); | 378 PartitionPage* page1 = GetFullPage(kTestAllocSize); |
| 378 EXPECT_EQ(page1, bucket->activePagesHead); | 379 EXPECT_EQ(page1, bucket->active_pages_head); |
| 379 EXPECT_EQ(0, page1->nextPage); | 380 EXPECT_EQ(0, page1->next_page); |
| 380 PartitionPage* page2 = GetFullPage(kTestAllocSize); | 381 PartitionPage* page2 = GetFullPage(kTestAllocSize); |
| 381 EXPECT_EQ(page2, bucket->activePagesHead); | 382 EXPECT_EQ(page2, bucket->active_pages_head); |
| 382 EXPECT_EQ(0, page2->nextPage); | 383 EXPECT_EQ(0, page2->next_page); |
| 383 | 384 |
| 384 // Bounce page1 back into the non-full list then fill it up again. | 385 // Bounce page1 back into the non-full list then fill it up again. |
| 385 char* ptr = | 386 char* ptr = |
| 386 reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; | 387 reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; |
| 387 partitionFree(ptr); | 388 PartitionFree(ptr); |
| 388 EXPECT_EQ(page1, bucket->activePagesHead); | 389 EXPECT_EQ(page1, bucket->active_pages_head); |
| 389 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 390 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 390 EXPECT_EQ(page1, bucket->activePagesHead); | 391 EXPECT_EQ(page1, bucket->active_pages_head); |
| 391 EXPECT_EQ(page2, bucket->activePagesHead->nextPage); | 392 EXPECT_EQ(page2, bucket->active_pages_head->next_page); |
| 392 | 393 |
| 393 // Allocating another page at this point should cause us to scan over page1 | 394 // Allocating another page at this point should cause us to scan over page1 |
| 394 // (which is both full and NOT our current page), and evict it from the | 395 // (which is both full and NOT our current page), and evict it from the |
| 395 // freelist. Older code had a O(n^2) condition due to failure to do this. | 396 // freelist. Older code had a O(n^2) condition due to failure to do this. |
| 396 PartitionPage* page3 = GetFullPage(kTestAllocSize); | 397 PartitionPage* page3 = GetFullPage(kTestAllocSize); |
| 397 EXPECT_EQ(page3, bucket->activePagesHead); | 398 EXPECT_EQ(page3, bucket->active_pages_head); |
| 398 EXPECT_EQ(0, page3->nextPage); | 399 EXPECT_EQ(0, page3->next_page); |
| 399 | 400 |
| 400 // Work out a pointer into page2 and free it. | 401 // Work out a pointer into page2 and free it. |
| 401 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset; | 402 ptr = reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset; |
| 402 partitionFree(ptr); | 403 PartitionFree(ptr); |
| 403 // Trying to allocate at this time should cause us to cycle around to page2 | 404 // Trying to allocate at this time should cause us to cycle around to page2 |
| 404 // and find the recently freed slot. | 405 // and find the recently freed slot. |
| 405 char* newPtr = reinterpret_cast<char*>( | 406 char* newPtr = reinterpret_cast<char*>( |
| 406 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 407 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 407 EXPECT_EQ(ptr, newPtr); | 408 EXPECT_EQ(ptr, newPtr); |
| 408 EXPECT_EQ(page2, bucket->activePagesHead); | 409 EXPECT_EQ(page2, bucket->active_pages_head); |
| 409 EXPECT_EQ(page3, page2->nextPage); | 410 EXPECT_EQ(page3, page2->next_page); |
| 410 | 411 |
| 411 // Work out a pointer into page1 and free it. This should pull the page | 412 // Work out a pointer into page1 and free it. This should pull the page |
| 412 // back into the list of available pages. | 413 // back into the list of available pages. |
| 413 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; | 414 ptr = reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; |
| 414 partitionFree(ptr); | 415 PartitionFree(ptr); |
| 415 // This allocation should be satisfied by page1. | 416 // This allocation should be satisfied by page1. |
| 416 newPtr = reinterpret_cast<char*>( | 417 newPtr = reinterpret_cast<char*>( |
| 417 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 418 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 418 EXPECT_EQ(ptr, newPtr); | 419 EXPECT_EQ(ptr, newPtr); |
| 419 EXPECT_EQ(page1, bucket->activePagesHead); | 420 EXPECT_EQ(page1, bucket->active_pages_head); |
| 420 EXPECT_EQ(page2, page1->nextPage); | 421 EXPECT_EQ(page2, page1->next_page); |
| 421 | 422 |
| 422 FreeFullPage(page3); | 423 FreeFullPage(page3); |
| 423 FreeFullPage(page2); | 424 FreeFullPage(page2); |
| 424 FreeFullPage(page1); | 425 FreeFullPage(page1); |
| 425 | 426 |
| 426 // Allocating whilst in this state exposed a bug, so keep the test. | 427 // Allocating whilst in this state exposed a bug, so keep the test. |
| 427 ptr = reinterpret_cast<char*>( | 428 ptr = reinterpret_cast<char*>( |
| 428 partitionAlloc(allocator.root(), kTestAllocSize, typeName)); | 429 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 429 partitionFree(ptr); | 430 PartitionFree(ptr); |
| 430 | 431 |
| 431 TestShutdown(); | 432 TestShutdown(); |
| 432 } | 433 } |
| 433 | 434 |
| 434 // Test some corner cases relating to page transitions in the internal | 435 // Test some corner cases relating to page transitions in the internal |
| 435 // free page list metadata bucket. | 436 // free page list metadata bucket. |
| 436 TEST(PartitionAllocTest, FreePageListPageTransitions) { | 437 TEST(PartitionAllocTest, FreePageListPageTransitions) { |
| 437 TestSetup(); | 438 TestSetup(); |
| 438 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 439 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 439 | 440 |
| 440 size_t numToFillFreeListPage = | 441 size_t numToFillFreeListPage = |
| 441 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); | 442 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); |
| 442 // The +1 is because we need to account for the fact that the current page | 443 // The +1 is because we need to account for the fact that the current page |
| 443 // never gets thrown on the freelist. | 444 // never gets thrown on the freelist. |
| 444 ++numToFillFreeListPage; | 445 ++numToFillFreeListPage; |
| 445 std::unique_ptr<PartitionPage* []> pages = | 446 std::unique_ptr<PartitionPage* []> pages = |
| 446 WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]); | 447 WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]); |
| 447 | 448 |
| 448 size_t i; | 449 size_t i; |
| 449 for (i = 0; i < numToFillFreeListPage; ++i) { | 450 for (i = 0; i < numToFillFreeListPage; ++i) { |
| 450 pages[i] = GetFullPage(kTestAllocSize); | 451 pages[i] = GetFullPage(kTestAllocSize); |
| 451 } | 452 } |
| 452 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); | 453 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); |
| 453 for (i = 0; i < numToFillFreeListPage; ++i) | 454 for (i = 0; i < numToFillFreeListPage; ++i) |
| 454 FreeFullPage(pages[i]); | 455 FreeFullPage(pages[i]); |
| 455 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 456 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 456 EXPECT_TRUE(bucket->emptyPagesHead); | 457 EXPECT_TRUE(bucket->empty_pages_head); |
| 457 | 458 |
| 458 // Allocate / free in a different bucket size so we get control of a | 459 // Allocate / free in a different bucket size so we get control of a |
| 459 // different free page list. We need two pages because one will be the last | 460 // different free page list. We need two pages because one will be the last |
| 460 // active page and not get freed. | 461 // active page and not get freed. |
| 461 PartitionPage* page1 = GetFullPage(kTestAllocSize * 2); | 462 PartitionPage* page1 = GetFullPage(kTestAllocSize * 2); |
| 462 PartitionPage* page2 = GetFullPage(kTestAllocSize * 2); | 463 PartitionPage* page2 = GetFullPage(kTestAllocSize * 2); |
| 463 FreeFullPage(page1); | 464 FreeFullPage(page1); |
| 464 FreeFullPage(page2); | 465 FreeFullPage(page2); |
| 465 | 466 |
| 466 for (i = 0; i < numToFillFreeListPage; ++i) { | 467 for (i = 0; i < numToFillFreeListPage; ++i) { |
| 467 pages[i] = GetFullPage(kTestAllocSize); | 468 pages[i] = GetFullPage(kTestAllocSize); |
| 468 } | 469 } |
| 469 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); | 470 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); |
| 470 | 471 |
| 471 for (i = 0; i < numToFillFreeListPage; ++i) | 472 for (i = 0; i < numToFillFreeListPage; ++i) |
| 472 FreeFullPage(pages[i]); | 473 FreeFullPage(pages[i]); |
| 473 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 474 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 474 EXPECT_TRUE(bucket->emptyPagesHead); | 475 EXPECT_TRUE(bucket->empty_pages_head); |
| 475 | 476 |
| 476 TestShutdown(); | 477 TestShutdown(); |
| 477 } | 478 } |
| 478 | 479 |
| 479 // Test a large series of allocations that cross more than one underlying | 480 // Test a large series of allocations that cross more than one underlying |
| 480 // 64KB super page allocation. | 481 // 64KB super page allocation. |
| 481 TEST(PartitionAllocTest, MultiPageAllocs) { | 482 TEST(PartitionAllocTest, MultiPageAllocs) { |
| 482 TestSetup(); | 483 TestSetup(); |
| 483 // This is guaranteed to cross a super page boundary because the first | 484 // This is guaranteed to cross a super page boundary because the first |
| 484 // partition page "slot" will be taken up by a guard page. | 485 // partition page "slot" will be taken up by a guard page. |
| 485 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage; | 486 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage; |
| 486 // The super page should begin and end in a guard so we one less page in | 487 // The super page should begin and end in a guard so we one less page in |
| 487 // order to allocate a single page in the new super page. | 488 // order to allocate a single page in the new super page. |
| 488 --numPagesNeeded; | 489 --numPagesNeeded; |
| 489 | 490 |
| 490 EXPECT_GT(numPagesNeeded, 1u); | 491 EXPECT_GT(numPagesNeeded, 1u); |
| 491 std::unique_ptr<PartitionPage* []> pages; | 492 std::unique_ptr<PartitionPage* []> pages; |
| 492 pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]); | 493 pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]); |
| 493 uintptr_t firstSuperPageBase = 0; | 494 uintptr_t firstSuperPageBase = 0; |
| 494 size_t i; | 495 size_t i; |
| 495 for (i = 0; i < numPagesNeeded; ++i) { | 496 for (i = 0; i < numPagesNeeded; ++i) { |
| 496 pages[i] = GetFullPage(kTestAllocSize); | 497 pages[i] = GetFullPage(kTestAllocSize); |
| 497 void* storagePtr = partitionPageToPointer(pages[i]); | 498 void* storagePtr = PartitionPageToPointer(pages[i]); |
| 498 if (!i) | 499 if (!i) |
| 499 firstSuperPageBase = | 500 firstSuperPageBase = |
| 500 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; | 501 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; |
| 501 if (i == numPagesNeeded - 1) { | 502 if (i == numPagesNeeded - 1) { |
| 502 uintptr_t secondSuperPageBase = | 503 uintptr_t secondSuperPageBase = |
| 503 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; | 504 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; |
| 504 uintptr_t secondSuperPageOffset = | 505 uintptr_t secondSuperPageOffset = |
| 505 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageOffsetMask; | 506 reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageOffsetMask; |
| 506 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase); | 507 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase); |
| 507 // Check that we allocated a guard page for the second page. | 508 // Check that we allocated a guard page for the second page. |
| 508 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset); | 509 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset); |
| 509 } | 510 } |
| 510 } | 511 } |
| 511 for (i = 0; i < numPagesNeeded; ++i) | 512 for (i = 0; i < numPagesNeeded; ++i) |
| 512 FreeFullPage(pages[i]); | 513 FreeFullPage(pages[i]); |
| 513 | 514 |
| 514 TestShutdown(); | 515 TestShutdown(); |
| 515 } | 516 } |
| 516 | 517 |
| 517 // Test the generic allocation functions that can handle arbitrary sizes and | 518 // Test the generic allocation functions that can handle arbitrary sizes and |
| 518 // reallocing etc. | 519 // reallocing etc. |
| 519 TEST(PartitionAllocTest, GenericAlloc) { | 520 TEST(PartitionAllocTest, GenericAlloc) { |
| 520 TestSetup(); | 521 TestSetup(); |
| 521 | 522 |
| 522 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); | 523 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); |
| 523 EXPECT_TRUE(ptr); | 524 EXPECT_TRUE(ptr); |
| 524 partitionFreeGeneric(genericAllocator.root(), ptr); | 525 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 525 ptr = partitionAllocGeneric(genericAllocator.root(), kGenericMaxBucketed + 1, | 526 ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1, |
| 526 typeName); | 527 type_name); |
| 527 EXPECT_TRUE(ptr); | 528 EXPECT_TRUE(ptr); |
| 528 partitionFreeGeneric(genericAllocator.root(), ptr); | 529 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 529 | 530 |
| 530 ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); | 531 ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); |
| 531 EXPECT_TRUE(ptr); | 532 EXPECT_TRUE(ptr); |
| 532 void* origPtr = ptr; | 533 void* origPtr = ptr; |
| 533 char* charPtr = static_cast<char*>(ptr); | 534 char* charPtr = static_cast<char*>(ptr); |
| 534 *charPtr = 'A'; | 535 *charPtr = 'A'; |
| 535 | 536 |
| 536 // Change the size of the realloc, remaining inside the same bucket. | 537 // Change the size of the realloc, remaining inside the same bucket. |
| 537 void* newPtr = | 538 void* newPtr = |
| 538 partitionReallocGeneric(genericAllocator.root(), ptr, 2, typeName); | 539 PartitionReallocGeneric(generic_allocator.root(), ptr, 2, type_name); |
| 539 EXPECT_EQ(ptr, newPtr); | 540 EXPECT_EQ(ptr, newPtr); |
| 540 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); | 541 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); |
| 541 EXPECT_EQ(ptr, newPtr); | 542 EXPECT_EQ(ptr, newPtr); |
| 542 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 543 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 543 kGenericSmallestBucket, typeName); | 544 kGenericSmallestBucket, type_name); |
| 544 EXPECT_EQ(ptr, newPtr); | 545 EXPECT_EQ(ptr, newPtr); |
| 545 | 546 |
| 546 // Change the size of the realloc, switching buckets. | 547 // Change the size of the realloc, switching buckets. |
| 547 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 548 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 548 kGenericSmallestBucket + 1, typeName); | 549 kGenericSmallestBucket + 1, type_name); |
| 549 EXPECT_NE(newPtr, ptr); | 550 EXPECT_NE(newPtr, ptr); |
| 550 // Check that the realloc copied correctly. | 551 // Check that the realloc copied correctly. |
| 551 char* newCharPtr = static_cast<char*>(newPtr); | 552 char* newCharPtr = static_cast<char*>(newPtr); |
| 552 EXPECT_EQ(*newCharPtr, 'A'); | 553 EXPECT_EQ(*newCharPtr, 'A'); |
| 553 #if DCHECK_IS_ON() | 554 #if DCHECK_IS_ON() |
| 554 // Subtle: this checks for an old bug where we copied too much from the | 555 // Subtle: this checks for an old bug where we copied too much from the |
| 555 // source of the realloc. The condition can be detected by a trashing of | 556 // source of the realloc. The condition can be detected by a trashing of |
| 556 // the uninitialized value in the space of the upsized allocation. | 557 // the uninitialized value in the space of the upsized allocation. |
| 557 EXPECT_EQ(kUninitializedByte, | 558 EXPECT_EQ(kUninitializedByte, |
| 558 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket))); | 559 static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket))); |
| 559 #endif | 560 #endif |
| 560 *newCharPtr = 'B'; | 561 *newCharPtr = 'B'; |
| 561 // The realloc moved. To check that the old allocation was freed, we can | 562 // The realloc moved. To check that the old allocation was freed, we can |
| 562 // do an alloc of the old allocation size and check that the old allocation | 563 // do an alloc of the old allocation size and check that the old allocation |
| 563 // address is at the head of the freelist and reused. | 564 // address is at the head of the freelist and reused. |
| 564 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); | 565 void* reusedPtr = |
| 566 PartitionAllocGeneric(generic_allocator.root(), 1, type_name); |
| 565 EXPECT_EQ(reusedPtr, origPtr); | 567 EXPECT_EQ(reusedPtr, origPtr); |
| 566 partitionFreeGeneric(genericAllocator.root(), reusedPtr); | 568 PartitionFreeGeneric(generic_allocator.root(), reusedPtr); |
| 567 | 569 |
| 568 // Downsize the realloc. | 570 // Downsize the realloc. |
| 569 ptr = newPtr; | 571 ptr = newPtr; |
| 570 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); | 572 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); |
| 571 EXPECT_EQ(newPtr, origPtr); | 573 EXPECT_EQ(newPtr, origPtr); |
| 572 newCharPtr = static_cast<char*>(newPtr); | 574 newCharPtr = static_cast<char*>(newPtr); |
| 573 EXPECT_EQ(*newCharPtr, 'B'); | 575 EXPECT_EQ(*newCharPtr, 'B'); |
| 574 *newCharPtr = 'C'; | 576 *newCharPtr = 'C'; |
| 575 | 577 |
| 576 // Upsize the realloc to outside the partition. | 578 // Upsize the realloc to outside the partition. |
| 577 ptr = newPtr; | 579 ptr = newPtr; |
| 578 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 580 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 579 kGenericMaxBucketed + 1, typeName); | 581 kGenericMaxBucketed + 1, type_name); |
| 580 EXPECT_NE(newPtr, ptr); | 582 EXPECT_NE(newPtr, ptr); |
| 581 newCharPtr = static_cast<char*>(newPtr); | 583 newCharPtr = static_cast<char*>(newPtr); |
| 582 EXPECT_EQ(*newCharPtr, 'C'); | 584 EXPECT_EQ(*newCharPtr, 'C'); |
| 583 *newCharPtr = 'D'; | 585 *newCharPtr = 'D'; |
| 584 | 586 |
| 585 // Upsize and downsize the realloc, remaining outside the partition. | 587 // Upsize and downsize the realloc, remaining outside the partition. |
| 586 ptr = newPtr; | 588 ptr = newPtr; |
| 587 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 589 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 588 kGenericMaxBucketed * 10, typeName); | 590 kGenericMaxBucketed * 10, type_name); |
| 589 newCharPtr = static_cast<char*>(newPtr); | 591 newCharPtr = static_cast<char*>(newPtr); |
| 590 EXPECT_EQ(*newCharPtr, 'D'); | 592 EXPECT_EQ(*newCharPtr, 'D'); |
| 591 *newCharPtr = 'E'; | 593 *newCharPtr = 'E'; |
| 592 ptr = newPtr; | 594 ptr = newPtr; |
| 593 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, | 595 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 594 kGenericMaxBucketed * 2, typeName); | 596 kGenericMaxBucketed * 2, type_name); |
| 595 newCharPtr = static_cast<char*>(newPtr); | 597 newCharPtr = static_cast<char*>(newPtr); |
| 596 EXPECT_EQ(*newCharPtr, 'E'); | 598 EXPECT_EQ(*newCharPtr, 'E'); |
| 597 *newCharPtr = 'F'; | 599 *newCharPtr = 'F'; |
| 598 | 600 |
| 599 // Downsize the realloc to inside the partition. | 601 // Downsize the realloc to inside the partition. |
| 600 ptr = newPtr; | 602 ptr = newPtr; |
| 601 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); | 603 newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); |
| 602 EXPECT_NE(newPtr, ptr); | 604 EXPECT_NE(newPtr, ptr); |
| 603 EXPECT_EQ(newPtr, origPtr); | 605 EXPECT_EQ(newPtr, origPtr); |
| 604 newCharPtr = static_cast<char*>(newPtr); | 606 newCharPtr = static_cast<char*>(newPtr); |
| 605 EXPECT_EQ(*newCharPtr, 'F'); | 607 EXPECT_EQ(*newCharPtr, 'F'); |
| 606 | 608 |
| 607 partitionFreeGeneric(genericAllocator.root(), newPtr); | 609 PartitionFreeGeneric(generic_allocator.root(), newPtr); |
| 608 TestShutdown(); | 610 TestShutdown(); |
| 609 } | 611 } |
| 610 | 612 |
| 611 // Test the generic allocation functions can handle some specific sizes of | 613 // Test the generic allocation functions can handle some specific sizes of |
| 612 // interest. | 614 // interest. |
| 613 TEST(PartitionAllocTest, GenericAllocSizes) { | 615 TEST(PartitionAllocTest, GenericAllocSizes) { |
| 614 TestSetup(); | 616 TestSetup(); |
| 615 | 617 |
| 616 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0, typeName); | 618 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name); |
| 617 EXPECT_TRUE(ptr); | 619 EXPECT_TRUE(ptr); |
| 618 partitionFreeGeneric(genericAllocator.root(), ptr); | 620 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 619 | 621 |
| 620 // kPartitionPageSize is interesting because it results in just one | 622 // kPartitionPageSize is interesting because it results in just one |
| 621 // allocation per page, which tripped up some corner cases. | 623 // allocation per page, which tripped up some corner cases. |
| 622 size_t size = kPartitionPageSize - kExtraAllocSize; | 624 size_t size = kPartitionPageSize - kExtraAllocSize; |
| 623 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 625 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 624 EXPECT_TRUE(ptr); | 626 EXPECT_TRUE(ptr); |
| 625 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 627 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 626 EXPECT_TRUE(ptr2); | 628 EXPECT_TRUE(ptr2); |
| 627 partitionFreeGeneric(genericAllocator.root(), ptr); | 629 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 628 // Should be freeable at this point. | 630 // Should be freeable at this point. |
| 629 PartitionPage* page = | 631 PartitionPage* page = |
| 630 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 632 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 631 EXPECT_NE(-1, page->emptyCacheIndex); | 633 EXPECT_NE(-1, page->empty_cache_index); |
| 632 partitionFreeGeneric(genericAllocator.root(), ptr2); | 634 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 633 | 635 |
| 634 size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - | 636 size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - |
| 635 kSystemPageSize) / | 637 kSystemPageSize) / |
| 636 2) - | 638 2) - |
| 637 kExtraAllocSize; | 639 kExtraAllocSize; |
| 638 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 640 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 639 EXPECT_TRUE(ptr); | 641 EXPECT_TRUE(ptr); |
| 640 memset(ptr, 'A', size); | 642 memset(ptr, 'A', size); |
| 641 ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 643 ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 642 EXPECT_TRUE(ptr2); | 644 EXPECT_TRUE(ptr2); |
| 643 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 645 void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 644 EXPECT_TRUE(ptr3); | 646 EXPECT_TRUE(ptr3); |
| 645 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 647 void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 646 EXPECT_TRUE(ptr4); | 648 EXPECT_TRUE(ptr4); |
| 647 | 649 |
| 648 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 650 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 649 PartitionPage* page2 = | 651 PartitionPage* page2 = |
| 650 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3)); | 652 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3)); |
| 651 EXPECT_NE(page, page2); | 653 EXPECT_NE(page, page2); |
| 652 | 654 |
| 653 partitionFreeGeneric(genericAllocator.root(), ptr); | 655 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 654 partitionFreeGeneric(genericAllocator.root(), ptr3); | 656 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 655 partitionFreeGeneric(genericAllocator.root(), ptr2); | 657 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 656 // Should be freeable at this point. | 658 // Should be freeable at this point. |
| 657 EXPECT_NE(-1, page->emptyCacheIndex); | 659 EXPECT_NE(-1, page->empty_cache_index); |
| 658 EXPECT_EQ(0, page->numAllocatedSlots); | 660 EXPECT_EQ(0, page->num_allocated_slots); |
| 659 EXPECT_EQ(0, page->numUnprovisionedSlots); | 661 EXPECT_EQ(0, page->num_unprovisioned_slots); |
| 660 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 662 void* newPtr = |
| 663 PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 661 EXPECT_EQ(ptr3, newPtr); | 664 EXPECT_EQ(ptr3, newPtr); |
| 662 newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 665 newPtr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 663 EXPECT_EQ(ptr2, newPtr); | 666 EXPECT_EQ(ptr2, newPtr); |
| 664 #if defined(OS_LINUX) && !DCHECK_IS_ON() | 667 #if defined(OS_LINUX) && !DCHECK_IS_ON() |
| 665 // On Linux, we have a guarantee that freelisting a page should cause its | 668 // On Linux, we have a guarantee that freelisting a page should cause its |
| 666 // contents to be nulled out. We check for null here to detect an bug we | 669 // contents to be nulled out. We check for null here to detect an bug we |
| 667 // had where a large slot size was causing us to not properly free all | 670 // had where a large slot size was causing us to not properly free all |
| 668 // resources back to the system. | 671 // resources back to the system. |
| 669 // We only run the check when asserts are disabled because when they are | 672 // We only run the check when asserts are disabled because when they are |
| 670 // enabled, the allocated area is overwritten with an "uninitialized" | 673 // enabled, the allocated area is overwritten with an "uninitialized" |
| 671 // byte pattern. | 674 // byte pattern. |
| 672 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); | 675 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); |
| 673 #endif | 676 #endif |
| 674 partitionFreeGeneric(genericAllocator.root(), newPtr); | 677 PartitionFreeGeneric(generic_allocator.root(), newPtr); |
| 675 partitionFreeGeneric(genericAllocator.root(), ptr3); | 678 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 676 partitionFreeGeneric(genericAllocator.root(), ptr4); | 679 PartitionFreeGeneric(generic_allocator.root(), ptr4); |
| 677 | 680 |
| 678 // Can we allocate a massive (512MB) size? | 681 // Can we allocate a massive (512MB) size? |
| 679 // Allocate 512MB, but +1, to test for cookie writing alignment issues. | 682 // Allocate 512MB, but +1, to test for cookie writing alignment issues. |
| 680 // Test this only if the device has enough memory or it might fail due | 683 // Test this only if the device has enough memory or it might fail due |
| 681 // to OOM. | 684 // to OOM. |
| 682 if (IsLargeMemoryDevice()) { | 685 if (IsLargeMemoryDevice()) { |
| 683 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024 + 1, | 686 ptr = PartitionAllocGeneric(generic_allocator.root(), 512 * 1024 * 1024 + 1, |
| 684 typeName); | 687 type_name); |
| 685 partitionFreeGeneric(genericAllocator.root(), ptr); | 688 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 686 } | 689 } |
| 687 | 690 |
| 688 // Check a more reasonable, but still direct mapped, size. | 691 // Check a more reasonable, but still direct mapped, size. |
| 689 // Chop a system page and a byte off to test for rounding errors. | 692 // Chop a system page and a byte off to test for rounding errors. |
| 690 size = 20 * 1024 * 1024; | 693 size = 20 * 1024 * 1024; |
| 691 size -= kSystemPageSize; | 694 size -= kSystemPageSize; |
| 692 size -= 1; | 695 size -= 1; |
| 693 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 696 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 694 char* charPtr = reinterpret_cast<char*>(ptr); | 697 char* charPtr = reinterpret_cast<char*>(ptr); |
| 695 *(charPtr + (size - 1)) = 'A'; | 698 *(charPtr + (size - 1)) = 'A'; |
| 696 partitionFreeGeneric(genericAllocator.root(), ptr); | 699 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 697 | 700 |
| 698 // Can we free null? | 701 // Can we free null? |
| 699 partitionFreeGeneric(genericAllocator.root(), 0); | 702 PartitionFreeGeneric(generic_allocator.root(), 0); |
| 700 | 703 |
| 701 // Do we correctly get a null for a failed allocation? | 704 // Do we correctly get a null for a failed allocation? |
| 702 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), | 705 EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(), |
| 703 PartitionAllocReturnNull, | 706 PartitionAllocReturnNull, |
| 704 3u * 1024 * 1024 * 1024, typeName)); | 707 3u * 1024 * 1024 * 1024, type_name)); |
| 705 | 708 |
| 706 TestShutdown(); | 709 TestShutdown(); |
| 707 } | 710 } |
| 708 | 711 |
| 709 // Test that we can fetch the real allocated size after an allocation. | 712 // Test that we can fetch the real allocated size after an allocation. |
| 710 TEST(PartitionAllocTest, GenericAllocGetSize) { | 713 TEST(PartitionAllocTest, GenericAllocGetSize) { |
| 711 TestSetup(); | 714 TestSetup(); |
| 712 | 715 |
| 713 void* ptr; | 716 void* ptr; |
| 714 size_t requestedSize, actualSize, predictedSize; | 717 size_t requestedSize, actualSize, predictedSize; |
| 715 | 718 |
| 716 EXPECT_TRUE(partitionAllocSupportsGetSize()); | 719 EXPECT_TRUE(PartitionAllocSupportsGetSize()); |
| 717 | 720 |
| 718 // Allocate something small. | 721 // Allocate something small. |
| 719 requestedSize = 511 - kExtraAllocSize; | 722 requestedSize = 511 - kExtraAllocSize; |
| 720 predictedSize = | 723 predictedSize = |
| 721 partitionAllocActualSize(genericAllocator.root(), requestedSize); | 724 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 722 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); | 725 ptr = |
| 726 PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); |
| 723 EXPECT_TRUE(ptr); | 727 EXPECT_TRUE(ptr); |
| 724 actualSize = partitionAllocGetSize(ptr); | 728 actualSize = PartitionAllocGetSize(ptr); |
| 725 EXPECT_EQ(predictedSize, actualSize); | 729 EXPECT_EQ(predictedSize, actualSize); |
| 726 EXPECT_LT(requestedSize, actualSize); | 730 EXPECT_LT(requestedSize, actualSize); |
| 727 partitionFreeGeneric(genericAllocator.root(), ptr); | 731 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 728 | 732 |
| 729 // Allocate a size that should be a perfect match for a bucket, because it | 733 // Allocate a size that should be a perfect match for a bucket, because it |
| 730 // is an exact power of 2. | 734 // is an exact power of 2. |
| 731 requestedSize = (256 * 1024) - kExtraAllocSize; | 735 requestedSize = (256 * 1024) - kExtraAllocSize; |
| 732 predictedSize = | 736 predictedSize = |
| 733 partitionAllocActualSize(genericAllocator.root(), requestedSize); | 737 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 734 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); | 738 ptr = |
| 739 PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); |
| 735 EXPECT_TRUE(ptr); | 740 EXPECT_TRUE(ptr); |
| 736 actualSize = partitionAllocGetSize(ptr); | 741 actualSize = PartitionAllocGetSize(ptr); |
| 737 EXPECT_EQ(predictedSize, actualSize); | 742 EXPECT_EQ(predictedSize, actualSize); |
| 738 EXPECT_EQ(requestedSize, actualSize); | 743 EXPECT_EQ(requestedSize, actualSize); |
| 739 partitionFreeGeneric(genericAllocator.root(), ptr); | 744 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 740 | 745 |
| 741 // Allocate a size that is a system page smaller than a bucket. GetSize() | 746 // Allocate a size that is a system page smaller than a bucket. GetSize() |
| 742 // should return a larger size than we asked for now. | 747 // should return a larger size than we asked for now. |
| 743 requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize; | 748 requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize; |
| 744 predictedSize = | 749 predictedSize = |
| 745 partitionAllocActualSize(genericAllocator.root(), requestedSize); | 750 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 746 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); | 751 ptr = |
| 752 PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); |
| 747 EXPECT_TRUE(ptr); | 753 EXPECT_TRUE(ptr); |
| 748 actualSize = partitionAllocGetSize(ptr); | 754 actualSize = PartitionAllocGetSize(ptr); |
| 749 EXPECT_EQ(predictedSize, actualSize); | 755 EXPECT_EQ(predictedSize, actualSize); |
| 750 EXPECT_EQ(requestedSize + kSystemPageSize, actualSize); | 756 EXPECT_EQ(requestedSize + kSystemPageSize, actualSize); |
| 751 // Check that we can write at the end of the reported size too. | 757 // Check that we can write at the end of the reported size too. |
| 752 char* charPtr = reinterpret_cast<char*>(ptr); | 758 char* charPtr = reinterpret_cast<char*>(ptr); |
| 753 *(charPtr + (actualSize - 1)) = 'A'; | 759 *(charPtr + (actualSize - 1)) = 'A'; |
| 754 partitionFreeGeneric(genericAllocator.root(), ptr); | 760 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 755 | 761 |
| 756 // Allocate something very large, and uneven. | 762 // Allocate something very large, and uneven. |
| 757 if (IsLargeMemoryDevice()) { | 763 if (IsLargeMemoryDevice()) { |
| 758 requestedSize = 512 * 1024 * 1024 - 1; | 764 requestedSize = 512 * 1024 * 1024 - 1; |
| 759 predictedSize = | 765 predictedSize = |
| 760 partitionAllocActualSize(genericAllocator.root(), requestedSize); | 766 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 761 ptr = | 767 ptr = PartitionAllocGeneric(generic_allocator.root(), requestedSize, |
| 762 partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); | 768 type_name); |
| 763 EXPECT_TRUE(ptr); | 769 EXPECT_TRUE(ptr); |
| 764 actualSize = partitionAllocGetSize(ptr); | 770 actualSize = PartitionAllocGetSize(ptr); |
| 765 EXPECT_EQ(predictedSize, actualSize); | 771 EXPECT_EQ(predictedSize, actualSize); |
| 766 EXPECT_LT(requestedSize, actualSize); | 772 EXPECT_LT(requestedSize, actualSize); |
| 767 partitionFreeGeneric(genericAllocator.root(), ptr); | 773 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 768 } | 774 } |
| 769 | 775 |
| 770 // Too large allocation. | 776 // Too large allocation. |
| 771 requestedSize = INT_MAX; | 777 requestedSize = INT_MAX; |
| 772 predictedSize = | 778 predictedSize = |
| 773 partitionAllocActualSize(genericAllocator.root(), requestedSize); | 779 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 774 EXPECT_EQ(requestedSize, predictedSize); | 780 EXPECT_EQ(requestedSize, predictedSize); |
| 775 | 781 |
| 776 TestShutdown(); | 782 TestShutdown(); |
| 777 } | 783 } |
| 778 | 784 |
| 779 // Test the realloc() contract. | 785 // Test the realloc() contract. |
| 780 TEST(PartitionAllocTest, Realloc) { | 786 TEST(PartitionAllocTest, Realloc) { |
| 781 TestSetup(); | 787 TestSetup(); |
| 782 | 788 |
| 783 // realloc(0, size) should be equivalent to malloc(). | 789 // realloc(0, size) should be equivalent to malloc(). |
| 784 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, | 790 void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0, |
| 785 kTestAllocSize, typeName); | 791 kTestAllocSize, type_name); |
| 786 memset(ptr, 'A', kTestAllocSize); | 792 memset(ptr, 'A', kTestAllocSize); |
| 787 PartitionPage* page = | 793 PartitionPage* page = |
| 788 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 794 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 789 // realloc(ptr, 0) should be equivalent to free(). | 795 // realloc(ptr, 0) should be equivalent to free(). |
| 790 void* ptr2 = | 796 void* ptr2 = |
| 791 partitionReallocGeneric(genericAllocator.root(), ptr, 0, typeName); | 797 PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name); |
| 792 EXPECT_EQ(0, ptr2); | 798 EXPECT_EQ(0, ptr2); |
| 793 EXPECT_EQ(partitionCookieFreePointerAdjust(ptr), page->freelistHead); | 799 EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head); |
| 794 | 800 |
| 795 // Test that growing an allocation with realloc() copies everything from the | 801 // Test that growing an allocation with realloc() copies everything from the |
| 796 // old allocation. | 802 // old allocation. |
| 797 size_t size = kSystemPageSize - kExtraAllocSize; | 803 size_t size = kSystemPageSize - kExtraAllocSize; |
| 798 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); | 804 EXPECT_EQ(size, PartitionAllocActualSize(generic_allocator.root(), size)); |
| 799 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 805 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 800 memset(ptr, 'A', size); | 806 memset(ptr, 'A', size); |
| 801 ptr2 = | 807 ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, size + 1, |
| 802 partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName); | 808 type_name); |
| 803 EXPECT_NE(ptr, ptr2); | 809 EXPECT_NE(ptr, ptr2); |
| 804 char* charPtr2 = static_cast<char*>(ptr2); | 810 char* charPtr2 = static_cast<char*>(ptr2); |
| 805 EXPECT_EQ('A', charPtr2[0]); | 811 EXPECT_EQ('A', charPtr2[0]); |
| 806 EXPECT_EQ('A', charPtr2[size - 1]); | 812 EXPECT_EQ('A', charPtr2[size - 1]); |
| 807 #if DCHECK_IS_ON() | 813 #if DCHECK_IS_ON() |
| 808 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size])); | 814 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size])); |
| 809 #endif | 815 #endif |
| 810 | 816 |
| 811 // Test that shrinking an allocation with realloc() also copies everything | 817 // Test that shrinking an allocation with realloc() also copies everything |
| 812 // from the old allocation. | 818 // from the old allocation. |
| 813 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, | 819 ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, size - 1, |
| 814 typeName); | 820 type_name); |
| 815 EXPECT_NE(ptr2, ptr); | 821 EXPECT_NE(ptr2, ptr); |
| 816 char* charPtr = static_cast<char*>(ptr); | 822 char* charPtr = static_cast<char*>(ptr); |
| 817 EXPECT_EQ('A', charPtr[0]); | 823 EXPECT_EQ('A', charPtr[0]); |
| 818 EXPECT_EQ('A', charPtr[size - 2]); | 824 EXPECT_EQ('A', charPtr[size - 2]); |
| 819 #if DCHECK_IS_ON() | 825 #if DCHECK_IS_ON() |
| 820 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); | 826 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); |
| 821 #endif | 827 #endif |
| 822 | 828 |
| 823 partitionFreeGeneric(genericAllocator.root(), ptr); | 829 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 824 | 830 |
| 825 // Test that shrinking a direct mapped allocation happens in-place. | 831 // Test that shrinking a direct mapped allocation happens in-place. |
| 826 size = kGenericMaxBucketed + 16 * kSystemPageSize; | 832 size = kGenericMaxBucketed + 16 * kSystemPageSize; |
| 827 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 833 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 828 size_t actualSize = partitionAllocGetSize(ptr); | 834 size_t actualSize = PartitionAllocGetSize(ptr); |
| 829 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, | 835 ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, |
| 830 kGenericMaxBucketed + 8 * kSystemPageSize, | 836 kGenericMaxBucketed + 8 * kSystemPageSize, |
| 831 typeName); | 837 type_name); |
| 832 EXPECT_EQ(ptr, ptr2); | 838 EXPECT_EQ(ptr, ptr2); |
| 833 EXPECT_EQ(actualSize - 8 * kSystemPageSize, partitionAllocGetSize(ptr2)); | 839 EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2)); |
| 834 | 840 |
| 835 // Test that a previously in-place shrunk direct mapped allocation can be | 841 // Test that a previously in-place shrunk direct mapped allocation can be |
| 836 // expanded up again within its original size. | 842 // expanded up again within its original size. |
| 837 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, | 843 ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, |
| 838 size - kSystemPageSize, typeName); | 844 size - kSystemPageSize, type_name); |
| 839 EXPECT_EQ(ptr2, ptr); | 845 EXPECT_EQ(ptr2, ptr); |
| 840 EXPECT_EQ(actualSize - kSystemPageSize, partitionAllocGetSize(ptr)); | 846 EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr)); |
| 841 | 847 |
| 842 // Test that a direct mapped allocation is performed not in-place when the | 848 // Test that a direct mapped allocation is performed not in-place when the |
| 843 // new size is small enough. | 849 // new size is small enough. |
| 844 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kSystemPageSize, | 850 ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize, |
| 845 typeName); | 851 type_name); |
| 846 EXPECT_NE(ptr, ptr2); | 852 EXPECT_NE(ptr, ptr2); |
| 847 | 853 |
| 848 partitionFreeGeneric(genericAllocator.root(), ptr2); | 854 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 849 | 855 |
| 850 TestShutdown(); | 856 TestShutdown(); |
| 851 } | 857 } |
| 852 | 858 |
| 853 // Tests the handing out of freelists for partial pages. | 859 // Tests the handing out of freelists for partial pages. |
| 854 TEST(PartitionAllocTest, PartialPageFreelists) { | 860 TEST(PartitionAllocTest, PartialPageFreelists) { |
| 855 TestSetup(); | 861 TestSetup(); |
| 856 | 862 |
| 857 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; | 863 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; |
| 858 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, | 864 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, |
| 859 bigSize + kExtraAllocSize); | 865 big_size + kExtraAllocSize); |
| 860 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift; | 866 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; |
| 861 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; | 867 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 862 EXPECT_EQ(0, bucket->emptyPagesHead); | 868 EXPECT_EQ(0, bucket->empty_pages_head); |
| 863 | 869 |
| 864 void* ptr = partitionAlloc(allocator.root(), bigSize, typeName); | 870 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 865 EXPECT_TRUE(ptr); | 871 EXPECT_TRUE(ptr); |
| 866 | 872 |
| 867 PartitionPage* page = | 873 PartitionPage* page = |
| 868 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 874 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 869 size_t totalSlots = | 875 size_t totalSlots = |
| 870 (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 876 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 871 (bigSize + kExtraAllocSize); | 877 (big_size + kExtraAllocSize); |
| 872 EXPECT_EQ(4u, totalSlots); | 878 EXPECT_EQ(4u, totalSlots); |
| 873 // The freelist should have one entry, because we were able to exactly fit | 879 // The freelist should have one entry, because we were able to exactly fit |
| 874 // one object slot and one freelist pointer (the null that the head points | 880 // one object slot and one freelist pointer (the null that the head points |
| 875 // to) into a system page. | 881 // to) into a system page. |
| 876 EXPECT_TRUE(page->freelistHead); | 882 EXPECT_TRUE(page->freelist_head); |
| 877 EXPECT_EQ(1, page->numAllocatedSlots); | 883 EXPECT_EQ(1, page->num_allocated_slots); |
| 878 EXPECT_EQ(2, page->numUnprovisionedSlots); | 884 EXPECT_EQ(2, page->num_unprovisioned_slots); |
| 879 | 885 |
| 880 void* ptr2 = partitionAlloc(allocator.root(), bigSize, typeName); | 886 void* ptr2 = PartitionAlloc(allocator.root(), big_size, type_name); |
| 881 EXPECT_TRUE(ptr2); | 887 EXPECT_TRUE(ptr2); |
| 882 EXPECT_FALSE(page->freelistHead); | 888 EXPECT_FALSE(page->freelist_head); |
| 883 EXPECT_EQ(2, page->numAllocatedSlots); | 889 EXPECT_EQ(2, page->num_allocated_slots); |
| 884 EXPECT_EQ(2, page->numUnprovisionedSlots); | 890 EXPECT_EQ(2, page->num_unprovisioned_slots); |
| 885 | 891 |
| 886 void* ptr3 = partitionAlloc(allocator.root(), bigSize, typeName); | 892 void* ptr3 = PartitionAlloc(allocator.root(), big_size, type_name); |
| 887 EXPECT_TRUE(ptr3); | 893 EXPECT_TRUE(ptr3); |
| 888 EXPECT_TRUE(page->freelistHead); | 894 EXPECT_TRUE(page->freelist_head); |
| 889 EXPECT_EQ(3, page->numAllocatedSlots); | 895 EXPECT_EQ(3, page->num_allocated_slots); |
| 890 EXPECT_EQ(0, page->numUnprovisionedSlots); | 896 EXPECT_EQ(0, page->num_unprovisioned_slots); |
| 891 | 897 |
| 892 void* ptr4 = partitionAlloc(allocator.root(), bigSize, typeName); | 898 void* ptr4 = PartitionAlloc(allocator.root(), big_size, type_name); |
| 893 EXPECT_TRUE(ptr4); | 899 EXPECT_TRUE(ptr4); |
| 894 EXPECT_FALSE(page->freelistHead); | 900 EXPECT_FALSE(page->freelist_head); |
| 895 EXPECT_EQ(4, page->numAllocatedSlots); | 901 EXPECT_EQ(4, page->num_allocated_slots); |
| 896 EXPECT_EQ(0, page->numUnprovisionedSlots); | 902 EXPECT_EQ(0, page->num_unprovisioned_slots); |
| 897 | 903 |
| 898 void* ptr5 = partitionAlloc(allocator.root(), bigSize, typeName); | 904 void* ptr5 = PartitionAlloc(allocator.root(), big_size, type_name); |
| 899 EXPECT_TRUE(ptr5); | 905 EXPECT_TRUE(ptr5); |
| 900 | 906 |
| 901 PartitionPage* page2 = | 907 PartitionPage* page2 = |
| 902 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr5)); | 908 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr5)); |
| 903 EXPECT_EQ(1, page2->numAllocatedSlots); | 909 EXPECT_EQ(1, page2->num_allocated_slots); |
| 904 | 910 |
| 905 // Churn things a little whilst there's a partial page freelist. | 911 // Churn things a little whilst there's a partial page freelist. |
| 906 partitionFree(ptr); | 912 PartitionFree(ptr); |
| 907 ptr = partitionAlloc(allocator.root(), bigSize, typeName); | 913 ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 908 void* ptr6 = partitionAlloc(allocator.root(), bigSize, typeName); | 914 void* ptr6 = PartitionAlloc(allocator.root(), big_size, type_name); |
| 909 | 915 |
| 910 partitionFree(ptr); | 916 PartitionFree(ptr); |
| 911 partitionFree(ptr2); | 917 PartitionFree(ptr2); |
| 912 partitionFree(ptr3); | 918 PartitionFree(ptr3); |
| 913 partitionFree(ptr4); | 919 PartitionFree(ptr4); |
| 914 partitionFree(ptr5); | 920 PartitionFree(ptr5); |
| 915 partitionFree(ptr6); | 921 PartitionFree(ptr6); |
| 916 EXPECT_NE(-1, page->emptyCacheIndex); | 922 EXPECT_NE(-1, page->empty_cache_index); |
| 917 EXPECT_NE(-1, page2->emptyCacheIndex); | 923 EXPECT_NE(-1, page2->empty_cache_index); |
| 918 EXPECT_TRUE(page2->freelistHead); | 924 EXPECT_TRUE(page2->freelist_head); |
| 919 EXPECT_EQ(0, page2->numAllocatedSlots); | 925 EXPECT_EQ(0, page2->num_allocated_slots); |
| 920 | 926 |
| 921 // And test a couple of sizes that do not cross kSystemPageSize with a single | 927 // And test a couple of sizes that do not cross kSystemPageSize with a single |
| 922 // allocation. | 928 // allocation. |
| 923 size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize; | 929 size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize; |
| 924 bucketIdx = (mediumSize + kExtraAllocSize) >> kBucketShift; | 930 bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift; |
| 925 bucket = &allocator.root()->buckets()[bucketIdx]; | 931 bucket = &allocator.root()->buckets()[bucket_index]; |
| 926 EXPECT_EQ(0, bucket->emptyPagesHead); | 932 EXPECT_EQ(0, bucket->empty_pages_head); |
| 927 | 933 |
| 928 ptr = partitionAlloc(allocator.root(), mediumSize, typeName); | 934 ptr = PartitionAlloc(allocator.root(), mediumSize, type_name); |
| 929 EXPECT_TRUE(ptr); | 935 EXPECT_TRUE(ptr); |
| 930 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 936 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 931 EXPECT_EQ(1, page->numAllocatedSlots); | 937 EXPECT_EQ(1, page->num_allocated_slots); |
| 932 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 938 totalSlots = |
| 933 (mediumSize + kExtraAllocSize); | 939 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 940 (mediumSize + kExtraAllocSize); |
| 934 size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize); | 941 size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize); |
| 935 EXPECT_EQ(2u, firstPageSlots); | 942 EXPECT_EQ(2u, firstPageSlots); |
| 936 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); | 943 EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); |
| 937 | 944 |
| 938 partitionFree(ptr); | 945 PartitionFree(ptr); |
| 939 | 946 |
| 940 size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize; | 947 size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize; |
| 941 bucketIdx = (smallSize + kExtraAllocSize) >> kBucketShift; | 948 bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift; |
| 942 bucket = &allocator.root()->buckets()[bucketIdx]; | 949 bucket = &allocator.root()->buckets()[bucket_index]; |
| 943 EXPECT_EQ(0, bucket->emptyPagesHead); | 950 EXPECT_EQ(0, bucket->empty_pages_head); |
| 944 | 951 |
| 945 ptr = partitionAlloc(allocator.root(), smallSize, typeName); | 952 ptr = PartitionAlloc(allocator.root(), smallSize, type_name); |
| 946 EXPECT_TRUE(ptr); | 953 EXPECT_TRUE(ptr); |
| 947 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 954 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 948 EXPECT_EQ(1, page->numAllocatedSlots); | 955 EXPECT_EQ(1, page->num_allocated_slots); |
| 949 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 956 totalSlots = |
| 950 (smallSize + kExtraAllocSize); | 957 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 958 (smallSize + kExtraAllocSize); |
| 951 firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize); | 959 firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize); |
| 952 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); | 960 EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); |
| 953 | 961 |
| 954 partitionFree(ptr); | 962 PartitionFree(ptr); |
| 955 EXPECT_TRUE(page->freelistHead); | 963 EXPECT_TRUE(page->freelist_head); |
| 956 EXPECT_EQ(0, page->numAllocatedSlots); | 964 EXPECT_EQ(0, page->num_allocated_slots); |
| 957 | 965 |
| 958 size_t verySmallSize = 32 - kExtraAllocSize; | 966 size_t verySmallSize = 32 - kExtraAllocSize; |
| 959 bucketIdx = (verySmallSize + kExtraAllocSize) >> kBucketShift; | 967 bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift; |
| 960 bucket = &allocator.root()->buckets()[bucketIdx]; | 968 bucket = &allocator.root()->buckets()[bucket_index]; |
| 961 EXPECT_EQ(0, bucket->emptyPagesHead); | 969 EXPECT_EQ(0, bucket->empty_pages_head); |
| 962 | 970 |
| 963 ptr = partitionAlloc(allocator.root(), verySmallSize, typeName); | 971 ptr = PartitionAlloc(allocator.root(), verySmallSize, type_name); |
| 964 EXPECT_TRUE(ptr); | 972 EXPECT_TRUE(ptr); |
| 965 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 973 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 966 EXPECT_EQ(1, page->numAllocatedSlots); | 974 EXPECT_EQ(1, page->num_allocated_slots); |
| 967 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 975 totalSlots = |
| 968 (verySmallSize + kExtraAllocSize); | 976 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 977 (verySmallSize + kExtraAllocSize); |
| 969 firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize); | 978 firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize); |
| 970 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); | 979 EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); |
| 971 | 980 |
| 972 partitionFree(ptr); | 981 PartitionFree(ptr); |
| 973 EXPECT_TRUE(page->freelistHead); | 982 EXPECT_TRUE(page->freelist_head); |
| 974 EXPECT_EQ(0, page->numAllocatedSlots); | 983 EXPECT_EQ(0, page->num_allocated_slots); |
| 975 | 984 |
| 976 // And try an allocation size (against the generic allocator) that is | 985 // And try an allocation size (against the generic allocator) that is |
| 977 // larger than a system page. | 986 // larger than a system page. |
| 978 size_t pageAndAHalfSize = | 987 size_t pageAndAHalfSize = |
| 979 (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize; | 988 (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize; |
| 980 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize, | 989 ptr = PartitionAllocGeneric(generic_allocator.root(), pageAndAHalfSize, |
| 981 typeName); | 990 type_name); |
| 982 EXPECT_TRUE(ptr); | 991 EXPECT_TRUE(ptr); |
| 983 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 992 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 984 EXPECT_EQ(1, page->numAllocatedSlots); | 993 EXPECT_EQ(1, page->num_allocated_slots); |
| 985 EXPECT_TRUE(page->freelistHead); | 994 EXPECT_TRUE(page->freelist_head); |
| 986 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 995 totalSlots = |
| 987 (pageAndAHalfSize + kExtraAllocSize); | 996 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 988 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots); | 997 (pageAndAHalfSize + kExtraAllocSize); |
| 989 partitionFreeGeneric(genericAllocator.root(), ptr); | 998 EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots); |
| 999 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 990 | 1000 |
| 991 // And then make sure than exactly the page size only faults one page. | 1001 // And then make sure than exactly the page size only faults one page. |
| 992 size_t pageSize = kSystemPageSize - kExtraAllocSize; | 1002 size_t pageSize = kSystemPageSize - kExtraAllocSize; |
| 993 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize, typeName); | 1003 ptr = PartitionAllocGeneric(generic_allocator.root(), pageSize, type_name); |
| 994 EXPECT_TRUE(ptr); | 1004 EXPECT_TRUE(ptr); |
| 995 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1005 page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 996 EXPECT_EQ(1, page->numAllocatedSlots); | 1006 EXPECT_EQ(1, page->num_allocated_slots); |
| 997 EXPECT_FALSE(page->freelistHead); | 1007 EXPECT_FALSE(page->freelist_head); |
| 998 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / | 1008 totalSlots = |
| 999 (pageSize + kExtraAllocSize); | 1009 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 1000 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots); | 1010 (pageSize + kExtraAllocSize); |
| 1001 partitionFreeGeneric(genericAllocator.root(), ptr); | 1011 EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots); |
| 1012 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1002 | 1013 |
| 1003 TestShutdown(); | 1014 TestShutdown(); |
| 1004 } | 1015 } |
| 1005 | 1016 |
| 1006 // Test some of the fragmentation-resistant properties of the allocator. | 1017 // Test some of the fragmentation-resistant properties of the allocator. |
| 1007 TEST(PartitionAllocTest, PageRefilling) { | 1018 TEST(PartitionAllocTest, PageRefilling) { |
| 1008 TestSetup(); | 1019 TestSetup(); |
| 1009 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 1020 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 1010 | 1021 |
| 1011 // Grab two full pages and a non-full page. | 1022 // Grab two full pages and a non-full page. |
| 1012 PartitionPage* page1 = GetFullPage(kTestAllocSize); | 1023 PartitionPage* page1 = GetFullPage(kTestAllocSize); |
| 1013 PartitionPage* page2 = GetFullPage(kTestAllocSize); | 1024 PartitionPage* page2 = GetFullPage(kTestAllocSize); |
| 1014 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 1025 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1015 EXPECT_TRUE(ptr); | 1026 EXPECT_TRUE(ptr); |
| 1016 EXPECT_NE(page1, bucket->activePagesHead); | 1027 EXPECT_NE(page1, bucket->active_pages_head); |
| 1017 EXPECT_NE(page2, bucket->activePagesHead); | 1028 EXPECT_NE(page2, bucket->active_pages_head); |
| 1018 PartitionPage* page = | 1029 PartitionPage* page = |
| 1019 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1030 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 1020 EXPECT_EQ(1, page->numAllocatedSlots); | 1031 EXPECT_EQ(1, page->num_allocated_slots); |
| 1021 | 1032 |
| 1022 // Work out a pointer into page2 and free it; and then page1 and free it. | 1033 // Work out a pointer into page2 and free it; and then page1 and free it. |
| 1023 char* ptr2 = | 1034 char* ptr2 = |
| 1024 reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; | 1035 reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; |
| 1025 partitionFree(ptr2); | 1036 PartitionFree(ptr2); |
| 1026 ptr2 = | 1037 ptr2 = |
| 1027 reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset; | 1038 reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset; |
| 1028 partitionFree(ptr2); | 1039 PartitionFree(ptr2); |
| 1029 | 1040 |
| 1030 // If we perform two allocations from the same bucket now, we expect to | 1041 // If we perform two allocations from the same bucket now, we expect to |
| 1031 // refill both the nearly full pages. | 1042 // refill both the nearly full pages. |
| 1032 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 1043 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1033 (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 1044 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1034 EXPECT_EQ(1, page->numAllocatedSlots); | 1045 EXPECT_EQ(1, page->num_allocated_slots); |
| 1035 | 1046 |
| 1036 FreeFullPage(page2); | 1047 FreeFullPage(page2); |
| 1037 FreeFullPage(page1); | 1048 FreeFullPage(page1); |
| 1038 partitionFree(ptr); | 1049 PartitionFree(ptr); |
| 1039 | 1050 |
| 1040 TestShutdown(); | 1051 TestShutdown(); |
| 1041 } | 1052 } |
| 1042 | 1053 |
| 1043 // Basic tests to ensure that allocations work for partial page buckets. | 1054 // Basic tests to ensure that allocations work for partial page buckets. |
| 1044 TEST(PartitionAllocTest, PartialPages) { | 1055 TEST(PartitionAllocTest, PartialPages) { |
| 1045 TestSetup(); | 1056 TestSetup(); |
| 1046 | 1057 |
| 1047 // Find a size that is backed by a partial partition page. | 1058 // Find a size that is backed by a partial partition page. |
| 1048 size_t size = sizeof(void*); | 1059 size_t size = sizeof(void*); |
| 1049 PartitionBucket* bucket = 0; | 1060 PartitionBucket* bucket = 0; |
| 1050 while (size < kTestMaxAllocation) { | 1061 while (size < kTestMaxAllocation) { |
| 1051 bucket = &allocator.root()->buckets()[size >> kBucketShift]; | 1062 bucket = &allocator.root()->buckets()[size >> kBucketShift]; |
| 1052 if (bucket->numSystemPagesPerSlotSpan % kNumSystemPagesPerPartitionPage) | 1063 if (bucket->num_system_pages_per_slot_span % |
| 1064 kNumSystemPagesPerPartitionPage) |
| 1053 break; | 1065 break; |
| 1054 size += sizeof(void*); | 1066 size += sizeof(void*); |
| 1055 } | 1067 } |
| 1056 EXPECT_LT(size, kTestMaxAllocation); | 1068 EXPECT_LT(size, kTestMaxAllocation); |
| 1057 | 1069 |
| 1058 PartitionPage* page1 = GetFullPage(size); | 1070 PartitionPage* page1 = GetFullPage(size); |
| 1059 PartitionPage* page2 = GetFullPage(size); | 1071 PartitionPage* page2 = GetFullPage(size); |
| 1060 FreeFullPage(page2); | 1072 FreeFullPage(page2); |
| 1061 FreeFullPage(page1); | 1073 FreeFullPage(page1); |
| 1062 | 1074 |
| 1063 TestShutdown(); | 1075 TestShutdown(); |
| 1064 } | 1076 } |
| 1065 | 1077 |
| 1066 // Test correct handling if our mapping collides with another. | 1078 // Test correct handling if our mapping collides with another. |
| 1067 TEST(PartitionAllocTest, MappingCollision) { | 1079 TEST(PartitionAllocTest, MappingCollision) { |
| 1068 TestSetup(); | 1080 TestSetup(); |
| 1069 // The -2 is because the first and last partition pages in a super page are | 1081 // The -2 is because the first and last partition pages in a super page are |
| 1070 // guard pages. | 1082 // guard pages. |
| 1071 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2; | 1083 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2; |
| 1072 std::unique_ptr<PartitionPage* []> firstSuperPagePages = | 1084 std::unique_ptr<PartitionPage* []> firstSuperPagePages = |
| 1073 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); | 1085 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); |
| 1074 std::unique_ptr<PartitionPage* []> secondSuperPagePages = | 1086 std::unique_ptr<PartitionPage* []> secondSuperPagePages = |
| 1075 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); | 1087 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); |
| 1076 | 1088 |
| 1077 size_t i; | 1089 size_t i; |
| 1078 for (i = 0; i < numPartitionPagesNeeded; ++i) | 1090 for (i = 0; i < numPartitionPagesNeeded; ++i) |
| 1079 firstSuperPagePages[i] = GetFullPage(kTestAllocSize); | 1091 firstSuperPagePages[i] = GetFullPage(kTestAllocSize); |
| 1080 | 1092 |
| 1081 char* pageBase = | 1093 char* pageBase = |
| 1082 reinterpret_cast<char*>(partitionPageToPointer(firstSuperPagePages[0])); | 1094 reinterpret_cast<char*>(PartitionPageToPointer(firstSuperPagePages[0])); |
| 1083 EXPECT_EQ(kPartitionPageSize, | 1095 EXPECT_EQ(kPartitionPageSize, |
| 1084 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); | 1096 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); |
| 1085 pageBase -= kPartitionPageSize; | 1097 pageBase -= kPartitionPageSize; |
| 1086 // Map a single system page either side of the mapping for our allocations, | 1098 // Map a single system page either side of the mapping for our allocations, |
| 1087 // with the goal of tripping up alignment of the next mapping. | 1099 // with the goal of tripping up alignment of the next mapping. |
| 1088 void* map1 = allocPages(pageBase - kPageAllocationGranularity, | 1100 void* map1 = AllocPages(pageBase - kPageAllocationGranularity, |
| 1089 kPageAllocationGranularity, | 1101 kPageAllocationGranularity, |
| 1090 kPageAllocationGranularity, PageInaccessible); | 1102 kPageAllocationGranularity, PageInaccessible); |
| 1091 EXPECT_TRUE(map1); | 1103 EXPECT_TRUE(map1); |
| 1092 void* map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, | 1104 void* map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, |
| 1093 kPageAllocationGranularity, PageInaccessible); | 1105 kPageAllocationGranularity, PageInaccessible); |
| 1094 EXPECT_TRUE(map2); | 1106 EXPECT_TRUE(map2); |
| 1095 | 1107 |
| 1096 for (i = 0; i < numPartitionPagesNeeded; ++i) | 1108 for (i = 0; i < numPartitionPagesNeeded; ++i) |
| 1097 secondSuperPagePages[i] = GetFullPage(kTestAllocSize); | 1109 secondSuperPagePages[i] = GetFullPage(kTestAllocSize); |
| 1098 | 1110 |
| 1099 freePages(map1, kPageAllocationGranularity); | 1111 FreePages(map1, kPageAllocationGranularity); |
| 1100 freePages(map2, kPageAllocationGranularity); | 1112 FreePages(map2, kPageAllocationGranularity); |
| 1101 | 1113 |
| 1102 pageBase = | 1114 pageBase = |
| 1103 reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0])); | 1115 reinterpret_cast<char*>(PartitionPageToPointer(secondSuperPagePages[0])); |
| 1104 EXPECT_EQ(kPartitionPageSize, | 1116 EXPECT_EQ(kPartitionPageSize, |
| 1105 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); | 1117 reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); |
| 1106 pageBase -= kPartitionPageSize; | 1118 pageBase -= kPartitionPageSize; |
| 1107 // Map a single system page either side of the mapping for our allocations, | 1119 // Map a single system page either side of the mapping for our allocations, |
| 1108 // with the goal of tripping up alignment of the next mapping. | 1120 // with the goal of tripping up alignment of the next mapping. |
| 1109 map1 = allocPages(pageBase - kPageAllocationGranularity, | 1121 map1 = AllocPages(pageBase - kPageAllocationGranularity, |
| 1110 kPageAllocationGranularity, kPageAllocationGranularity, | 1122 kPageAllocationGranularity, kPageAllocationGranularity, |
| 1111 PageAccessible); | 1123 PageAccessible); |
| 1112 EXPECT_TRUE(map1); | 1124 EXPECT_TRUE(map1); |
| 1113 map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, | 1125 map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, |
| 1114 kPageAllocationGranularity, PageAccessible); | 1126 kPageAllocationGranularity, PageAccessible); |
| 1115 EXPECT_TRUE(map2); | 1127 EXPECT_TRUE(map2); |
| 1116 setSystemPagesInaccessible(map1, kPageAllocationGranularity); | 1128 SetSystemPagesInaccessible(map1, kPageAllocationGranularity); |
| 1117 setSystemPagesInaccessible(map2, kPageAllocationGranularity); | 1129 SetSystemPagesInaccessible(map2, kPageAllocationGranularity); |
| 1118 | 1130 |
| 1119 PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize); | 1131 PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize); |
| 1120 freePages(map1, kPageAllocationGranularity); | 1132 FreePages(map1, kPageAllocationGranularity); |
| 1121 freePages(map2, kPageAllocationGranularity); | 1133 FreePages(map2, kPageAllocationGranularity); |
| 1122 | 1134 |
| 1123 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>( | 1135 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>( |
| 1124 partitionPageToPointer(pageInThirdSuperPage)) & | 1136 PartitionPageToPointer(pageInThirdSuperPage)) & |
| 1125 kPartitionPageOffsetMask); | 1137 kPartitionPageOffsetMask); |
| 1126 | 1138 |
| 1127 // And make sure we really did get a page in a new superpage. | 1139 // And make sure we really did get a page in a new superpage. |
| 1128 EXPECT_NE(reinterpret_cast<uintptr_t>( | 1140 EXPECT_NE(reinterpret_cast<uintptr_t>( |
| 1129 partitionPageToPointer(firstSuperPagePages[0])) & | 1141 PartitionPageToPointer(firstSuperPagePages[0])) & |
| 1130 kSuperPageBaseMask, | 1142 kSuperPageBaseMask, |
| 1131 reinterpret_cast<uintptr_t>( | 1143 reinterpret_cast<uintptr_t>( |
| 1132 partitionPageToPointer(pageInThirdSuperPage)) & | 1144 PartitionPageToPointer(pageInThirdSuperPage)) & |
| 1133 kSuperPageBaseMask); | 1145 kSuperPageBaseMask); |
| 1134 EXPECT_NE(reinterpret_cast<uintptr_t>( | 1146 EXPECT_NE(reinterpret_cast<uintptr_t>( |
| 1135 partitionPageToPointer(secondSuperPagePages[0])) & | 1147 PartitionPageToPointer(secondSuperPagePages[0])) & |
| 1136 kSuperPageBaseMask, | 1148 kSuperPageBaseMask, |
| 1137 reinterpret_cast<uintptr_t>( | 1149 reinterpret_cast<uintptr_t>( |
| 1138 partitionPageToPointer(pageInThirdSuperPage)) & | 1150 PartitionPageToPointer(pageInThirdSuperPage)) & |
| 1139 kSuperPageBaseMask); | 1151 kSuperPageBaseMask); |
| 1140 | 1152 |
| 1141 FreeFullPage(pageInThirdSuperPage); | 1153 FreeFullPage(pageInThirdSuperPage); |
| 1142 for (i = 0; i < numPartitionPagesNeeded; ++i) { | 1154 for (i = 0; i < numPartitionPagesNeeded; ++i) { |
| 1143 FreeFullPage(firstSuperPagePages[i]); | 1155 FreeFullPage(firstSuperPagePages[i]); |
| 1144 FreeFullPage(secondSuperPagePages[i]); | 1156 FreeFullPage(secondSuperPagePages[i]); |
| 1145 } | 1157 } |
| 1146 | 1158 |
| 1147 TestShutdown(); | 1159 TestShutdown(); |
| 1148 } | 1160 } |
| 1149 | 1161 |
| 1150 // Tests that pages in the free page cache do get freed as appropriate. | 1162 // Tests that pages in the free page cache do get freed as appropriate. |
| 1151 TEST(PartitionAllocTest, FreeCache) { | 1163 TEST(PartitionAllocTest, FreeCache) { |
| 1152 TestSetup(); | 1164 TestSetup(); |
| 1153 | 1165 |
| 1154 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages); | 1166 EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages); |
| 1155 | 1167 |
| 1156 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; | 1168 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; |
| 1157 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift; | 1169 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; |
| 1158 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; | 1170 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 1159 | 1171 |
| 1160 void* ptr = partitionAlloc(allocator.root(), bigSize, typeName); | 1172 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 1161 EXPECT_TRUE(ptr); | 1173 EXPECT_TRUE(ptr); |
| 1162 PartitionPage* page = | 1174 PartitionPage* page = |
| 1163 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1175 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 1164 EXPECT_EQ(0, bucket->emptyPagesHead); | 1176 EXPECT_EQ(0, bucket->empty_pages_head); |
| 1165 EXPECT_EQ(1, page->numAllocatedSlots); | 1177 EXPECT_EQ(1, page->num_allocated_slots); |
| 1166 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages); | 1178 EXPECT_EQ(kPartitionPageSize, |
| 1167 partitionFree(ptr); | 1179 allocator.root()->total_size_of_committed_pages); |
| 1168 EXPECT_EQ(0, page->numAllocatedSlots); | 1180 PartitionFree(ptr); |
| 1169 EXPECT_NE(-1, page->emptyCacheIndex); | 1181 EXPECT_EQ(0, page->num_allocated_slots); |
| 1170 EXPECT_TRUE(page->freelistHead); | 1182 EXPECT_NE(-1, page->empty_cache_index); |
| 1183 EXPECT_TRUE(page->freelist_head); |
| 1171 | 1184 |
| 1172 CycleFreeCache(kTestAllocSize); | 1185 CycleFreeCache(kTestAllocSize); |
| 1173 | 1186 |
| 1174 // Flushing the cache should have really freed the unused page. | 1187 // Flushing the cache should have really freed the unused page. |
| 1175 EXPECT_FALSE(page->freelistHead); | 1188 EXPECT_FALSE(page->freelist_head); |
| 1176 EXPECT_EQ(-1, page->emptyCacheIndex); | 1189 EXPECT_EQ(-1, page->empty_cache_index); |
| 1177 EXPECT_EQ(0, page->numAllocatedSlots); | 1190 EXPECT_EQ(0, page->num_allocated_slots); |
| 1178 PartitionBucket* cycleFreeCacheBucket = | 1191 PartitionBucket* cycle_free_cache_bucket = |
| 1179 &allocator.root()->buckets()[kTestBucketIndex]; | 1192 &allocator.root()->buckets()[kTestBucketIndex]; |
| 1180 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * kSystemPageSize, | 1193 EXPECT_EQ( |
| 1181 allocator.root()->totalSizeOfCommittedPages); | 1194 cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize, |
| 1195 allocator.root()->total_size_of_committed_pages); |
| 1182 | 1196 |
| 1183 // Check that an allocation works ok whilst in this state (a free'd page | 1197 // Check that an allocation works ok whilst in this state (a free'd page |
| 1184 // as the active pages head). | 1198 // as the active pages head). |
| 1185 ptr = partitionAlloc(allocator.root(), bigSize, typeName); | 1199 ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 1186 EXPECT_FALSE(bucket->emptyPagesHead); | 1200 EXPECT_FALSE(bucket->empty_pages_head); |
| 1187 partitionFree(ptr); | 1201 PartitionFree(ptr); |
| 1188 | 1202 |
| 1189 // Also check that a page that is bouncing immediately between empty and | 1203 // Also check that a page that is bouncing immediately between empty and |
| 1190 // used does not get freed. | 1204 // used does not get freed. |
| 1191 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) { | 1205 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) { |
| 1192 ptr = partitionAlloc(allocator.root(), bigSize, typeName); | 1206 ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 1193 EXPECT_TRUE(page->freelistHead); | 1207 EXPECT_TRUE(page->freelist_head); |
| 1194 partitionFree(ptr); | 1208 PartitionFree(ptr); |
| 1195 EXPECT_TRUE(page->freelistHead); | 1209 EXPECT_TRUE(page->freelist_head); |
| 1196 } | 1210 } |
| 1197 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages); | 1211 EXPECT_EQ(kPartitionPageSize, |
| 1212 allocator.root()->total_size_of_committed_pages); |
| 1198 TestShutdown(); | 1213 TestShutdown(); |
| 1199 } | 1214 } |
| 1200 | 1215 |
| 1201 // Tests for a bug we had with losing references to free pages. | 1216 // Tests for a bug we had with losing references to free pages. |
| 1202 TEST(PartitionAllocTest, LostFreePagesBug) { | 1217 TEST(PartitionAllocTest, LostFreePagesBug) { |
| 1203 TestSetup(); | 1218 TestSetup(); |
| 1204 | 1219 |
| 1205 size_t size = kPartitionPageSize - kExtraAllocSize; | 1220 size_t size = kPartitionPageSize - kExtraAllocSize; |
| 1206 | 1221 |
| 1207 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1222 void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1208 EXPECT_TRUE(ptr); | 1223 EXPECT_TRUE(ptr); |
| 1209 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1224 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1210 EXPECT_TRUE(ptr2); | 1225 EXPECT_TRUE(ptr2); |
| 1211 | 1226 |
| 1212 PartitionPage* page = | 1227 PartitionPage* page = |
| 1213 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1228 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 1214 PartitionPage* page2 = | 1229 PartitionPage* page2 = |
| 1215 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr2)); | 1230 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr2)); |
| 1216 PartitionBucket* bucket = page->bucket; | 1231 PartitionBucket* bucket = page->bucket; |
| 1217 | 1232 |
| 1218 EXPECT_EQ(0, bucket->emptyPagesHead); | 1233 EXPECT_EQ(0, bucket->empty_pages_head); |
| 1219 EXPECT_EQ(-1, page->numAllocatedSlots); | 1234 EXPECT_EQ(-1, page->num_allocated_slots); |
| 1220 EXPECT_EQ(1, page2->numAllocatedSlots); | 1235 EXPECT_EQ(1, page2->num_allocated_slots); |
| 1221 | 1236 |
| 1222 partitionFreeGeneric(genericAllocator.root(), ptr); | 1237 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1223 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1238 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1224 | 1239 |
| 1225 EXPECT_TRUE(bucket->emptyPagesHead); | 1240 EXPECT_TRUE(bucket->empty_pages_head); |
| 1226 EXPECT_TRUE(bucket->emptyPagesHead->nextPage); | 1241 EXPECT_TRUE(bucket->empty_pages_head->next_page); |
| 1227 EXPECT_EQ(0, page->numAllocatedSlots); | 1242 EXPECT_EQ(0, page->num_allocated_slots); |
| 1228 EXPECT_EQ(0, page2->numAllocatedSlots); | 1243 EXPECT_EQ(0, page2->num_allocated_slots); |
| 1229 EXPECT_TRUE(page->freelistHead); | 1244 EXPECT_TRUE(page->freelist_head); |
| 1230 EXPECT_TRUE(page2->freelistHead); | 1245 EXPECT_TRUE(page2->freelist_head); |
| 1231 | 1246 |
| 1232 CycleGenericFreeCache(kTestAllocSize); | 1247 CycleGenericFreeCache(kTestAllocSize); |
| 1233 | 1248 |
| 1234 EXPECT_FALSE(page->freelistHead); | 1249 EXPECT_FALSE(page->freelist_head); |
| 1235 EXPECT_FALSE(page2->freelistHead); | 1250 EXPECT_FALSE(page2->freelist_head); |
| 1236 | 1251 |
| 1237 EXPECT_TRUE(bucket->emptyPagesHead); | 1252 EXPECT_TRUE(bucket->empty_pages_head); |
| 1238 EXPECT_TRUE(bucket->emptyPagesHead->nextPage); | 1253 EXPECT_TRUE(bucket->empty_pages_head->next_page); |
| 1239 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 1254 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 1240 | 1255 |
| 1241 // At this moment, we have two decommitted pages, on the empty list. | 1256 // At this moment, we have two decommitted pages, on the empty list. |
| 1242 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1257 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1243 EXPECT_TRUE(ptr); | 1258 EXPECT_TRUE(ptr); |
| 1244 partitionFreeGeneric(genericAllocator.root(), ptr); | 1259 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1245 | 1260 |
| 1246 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); | 1261 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 1247 EXPECT_TRUE(bucket->emptyPagesHead); | 1262 EXPECT_TRUE(bucket->empty_pages_head); |
| 1248 EXPECT_TRUE(bucket->decommittedPagesHead); | 1263 EXPECT_TRUE(bucket->decommitted_pages_head); |
| 1249 | 1264 |
| 1250 CycleGenericFreeCache(kTestAllocSize); | 1265 CycleGenericFreeCache(kTestAllocSize); |
| 1251 | 1266 |
| 1252 // We're now set up to trigger a historical bug by scanning over the active | 1267 // We're now set up to trigger a historical bug by scanning over the active |
| 1253 // pages list. The current code gets into a different state, but we'll keep | 1268 // pages list. The current code gets into a different state, but we'll keep |
| 1254 // the test as being an interesting corner case. | 1269 // the test as being an interesting corner case. |
| 1255 ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1270 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1256 EXPECT_TRUE(ptr); | 1271 EXPECT_TRUE(ptr); |
| 1257 partitionFreeGeneric(genericAllocator.root(), ptr); | 1272 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1258 | 1273 |
| 1259 EXPECT_TRUE(bucket->activePagesHead); | 1274 EXPECT_TRUE(bucket->active_pages_head); |
| 1260 EXPECT_TRUE(bucket->emptyPagesHead); | 1275 EXPECT_TRUE(bucket->empty_pages_head); |
| 1261 EXPECT_TRUE(bucket->decommittedPagesHead); | 1276 EXPECT_TRUE(bucket->decommitted_pages_head); |
| 1262 | 1277 |
| 1263 TestShutdown(); | 1278 TestShutdown(); |
| 1264 } | 1279 } |
| 1265 | 1280 |
| 1266 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) | 1281 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
| 1267 | 1282 |
| 1268 static void DoReturnNullTest(size_t allocSize) { | 1283 static void DoReturnNullTest(size_t allocSize) { |
| 1269 // TODO(crbug.com/678782): Where necessary and possible, disable the | 1284 // TODO(crbug.com/678782): Where necessary and possible, disable the |
| 1270 // platform's OOM-killing behavior. OOM-killing makes this test flaky on | 1285 // platform's OOM-killing behavior. OOM-killing makes this test flaky on |
| 1271 // low-memory devices. | 1286 // low-memory devices. |
| 1272 if (!IsLargeMemoryDevice()) { | 1287 if (!IsLargeMemoryDevice()) { |
| 1273 LOG(WARNING) << "Skipping test on this device because of crbug.com/678782"; | 1288 LOG(WARNING) << "Skipping test on this device because of crbug.com/678782"; |
| 1274 return; | 1289 return; |
| 1275 } | 1290 } |
| 1276 | 1291 |
| 1277 TestSetup(); | 1292 TestSetup(); |
| 1278 | 1293 |
| 1279 EXPECT_TRUE(SetAddressSpaceLimit()); | 1294 EXPECT_TRUE(SetAddressSpaceLimit()); |
| 1280 | 1295 |
| 1281 // Work out the number of allocations for 6 GB of memory. | 1296 // Work out the number of allocations for 6 GB of memory. |
| 1282 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); | 1297 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); |
| 1283 | 1298 |
| 1284 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric( | 1299 void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric( |
| 1285 genericAllocator.root(), numAllocations * sizeof(void*), typeName)); | 1300 generic_allocator.root(), numAllocations * sizeof(void*), type_name)); |
| 1286 int i; | 1301 int i; |
| 1287 | 1302 |
| 1288 for (i = 0; i < numAllocations; ++i) { | 1303 for (i = 0; i < numAllocations; ++i) { |
| 1289 ptrs[i] = partitionAllocGenericFlags( | 1304 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 1290 genericAllocator.root(), PartitionAllocReturnNull, allocSize, typeName); | 1305 PartitionAllocReturnNull, allocSize, |
| 1306 type_name); |
| 1291 if (!i) | 1307 if (!i) |
| 1292 EXPECT_TRUE(ptrs[0]); | 1308 EXPECT_TRUE(ptrs[0]); |
| 1293 if (!ptrs[i]) { | 1309 if (!ptrs[i]) { |
| 1294 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), | 1310 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 1295 PartitionAllocReturnNull, allocSize, | 1311 PartitionAllocReturnNull, allocSize, |
| 1296 typeName); | 1312 type_name); |
| 1297 EXPECT_FALSE(ptrs[i]); | 1313 EXPECT_FALSE(ptrs[i]); |
| 1298 break; | 1314 break; |
| 1299 } | 1315 } |
| 1300 } | 1316 } |
| 1301 | 1317 |
| 1302 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then | 1318 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then |
| 1303 // we're not actually testing anything here. | 1319 // we're not actually testing anything here. |
| 1304 EXPECT_LT(i, numAllocations); | 1320 EXPECT_LT(i, numAllocations); |
| 1305 | 1321 |
| 1306 // Free, reallocate and free again each block we allocated. We do this to | 1322 // Free, reallocate and free again each block we allocated. We do this to |
| 1307 // check that freeing memory also works correctly after a failed allocation. | 1323 // check that freeing memory also works correctly after a failed allocation. |
| 1308 for (--i; i >= 0; --i) { | 1324 for (--i; i >= 0; --i) { |
| 1309 partitionFreeGeneric(genericAllocator.root(), ptrs[i]); | 1325 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); |
| 1310 ptrs[i] = partitionAllocGenericFlags( | 1326 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 1311 genericAllocator.root(), PartitionAllocReturnNull, allocSize, typeName); | 1327 PartitionAllocReturnNull, allocSize, |
| 1328 type_name); |
| 1312 EXPECT_TRUE(ptrs[i]); | 1329 EXPECT_TRUE(ptrs[i]); |
| 1313 partitionFreeGeneric(genericAllocator.root(), ptrs[i]); | 1330 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); |
| 1314 } | 1331 } |
| 1315 | 1332 |
| 1316 partitionFreeGeneric(genericAllocator.root(), ptrs); | 1333 PartitionFreeGeneric(generic_allocator.root(), ptrs); |
| 1317 | 1334 |
| 1318 EXPECT_TRUE(ClearAddressSpaceLimit()); | 1335 EXPECT_TRUE(ClearAddressSpaceLimit()); |
| 1319 | 1336 |
| 1320 TestShutdown(); | 1337 TestShutdown(); |
| 1321 } | 1338 } |
| 1322 | 1339 |
| 1323 // Tests that if an allocation fails in "return null" mode, repeating it doesn't | 1340 // Tests that if an allocation fails in "return null" mode, repeating it doesn't |
| 1324 // crash, and still returns null. The test tries to allocate 6 GB of memory in | 1341 // crash, and still returns null. The test tries to allocate 6 GB of memory in |
| 1325 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 6 GB | 1342 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 6 GB |
| 1326 // using setrlimit() first. | 1343 // using setrlimit() first. |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1355 | 1372 |
| 1356 // Death tests misbehave on Android, http://crbug.com/643760. | 1373 // Death tests misbehave on Android, http://crbug.com/643760. |
| 1357 #if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) | 1374 #if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) |
| 1358 | 1375 |
| 1359 // Make sure that malloc(-1) dies. | 1376 // Make sure that malloc(-1) dies. |
| 1360 // In the past, we had an integer overflow that would alias malloc(-1) to | 1377 // In the past, we had an integer overflow that would alias malloc(-1) to |
| 1361 // malloc(0), which is not good. | 1378 // malloc(0), which is not good. |
| 1362 TEST(PartitionAllocDeathTest, LargeAllocs) { | 1379 TEST(PartitionAllocDeathTest, LargeAllocs) { |
| 1363 TestSetup(); | 1380 TestSetup(); |
| 1364 // Largest alloc. | 1381 // Largest alloc. |
| 1365 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), | 1382 EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(), |
| 1366 static_cast<size_t>(-1), typeName), | 1383 static_cast<size_t>(-1), type_name), |
| 1367 ""); | 1384 ""); |
| 1368 // And the smallest allocation we expect to die. | 1385 // And the smallest allocation we expect to die. |
| 1369 EXPECT_DEATH( | 1386 EXPECT_DEATH( |
| 1370 partitionAllocGeneric(genericAllocator.root(), | 1387 PartitionAllocGeneric(generic_allocator.root(), |
| 1371 static_cast<size_t>(INT_MAX) + 1, typeName), | 1388 static_cast<size_t>(INT_MAX) + 1, type_name), |
| 1372 ""); | 1389 ""); |
| 1373 | 1390 |
| 1374 TestShutdown(); | 1391 TestShutdown(); |
| 1375 } | 1392 } |
| 1376 | 1393 |
| 1377 // Check that our immediate double-free detection works. | 1394 // Check that our immediate double-free detection works. |
| 1378 TEST(PartitionAllocDeathTest, ImmediateDoubleFree) { | 1395 TEST(PartitionAllocDeathTest, ImmediateDoubleFree) { |
| 1379 TestSetup(); | 1396 TestSetup(); |
| 1380 | 1397 |
| 1381 void* ptr = | 1398 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1382 partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); | 1399 type_name); |
| 1383 EXPECT_TRUE(ptr); | 1400 EXPECT_TRUE(ptr); |
| 1384 partitionFreeGeneric(genericAllocator.root(), ptr); | 1401 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1385 | 1402 |
| 1386 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), ""); | 1403 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); |
| 1387 | 1404 |
| 1388 TestShutdown(); | 1405 TestShutdown(); |
| 1389 } | 1406 } |
| 1390 | 1407 |
| 1391 // Check that our refcount-based double-free detection works. | 1408 // Check that our refcount-based double-free detection works. |
| 1392 TEST(PartitionAllocDeathTest, RefcountDoubleFree) { | 1409 TEST(PartitionAllocDeathTest, RefcountDoubleFree) { |
| 1393 TestSetup(); | 1410 TestSetup(); |
| 1394 | 1411 |
| 1395 void* ptr = | 1412 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1396 partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); | 1413 type_name); |
| 1397 EXPECT_TRUE(ptr); | 1414 EXPECT_TRUE(ptr); |
| 1398 void* ptr2 = | 1415 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1399 partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); | 1416 type_name); |
| 1400 EXPECT_TRUE(ptr2); | 1417 EXPECT_TRUE(ptr2); |
| 1401 partitionFreeGeneric(genericAllocator.root(), ptr); | 1418 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1402 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1419 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1403 // This is not an immediate double-free so our immediate detection won't | 1420 // This is not an immediate double-free so our immediate detection won't |
| 1404 // fire. However, it does take the "refcount" of the partition page to -1, | 1421 // fire. However, it does take the "refcount" of the partition page to -1, |
| 1405 // which is illegal and should be trapped. | 1422 // which is illegal and should be trapped. |
| 1406 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), ""); | 1423 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); |
| 1407 | 1424 |
| 1408 TestShutdown(); | 1425 TestShutdown(); |
| 1409 } | 1426 } |
| 1410 | 1427 |
| 1411 // Check that guard pages are present where expected. | 1428 // Check that guard pages are present where expected. |
| 1412 TEST(PartitionAllocDeathTest, GuardPages) { | 1429 TEST(PartitionAllocDeathTest, GuardPages) { |
| 1413 TestSetup(); | 1430 TestSetup(); |
| 1414 | 1431 |
| 1415 // partitionAlloc adds kPartitionPageSize to the requested size | 1432 // PartitionAlloc adds kPartitionPageSize to the requested size |
| 1416 // (for metadata), and then rounds that size to kPageAllocationGranularity. | 1433 // (for metadata), and then rounds that size to kPageAllocationGranularity. |
| 1417 // To be able to reliably write one past a direct allocation, choose a size | 1434 // To be able to reliably write one past a direct allocation, choose a size |
| 1418 // that's | 1435 // that's |
| 1419 // a) larger than kGenericMaxBucketed (to make the allocation direct) | 1436 // a) larger than kGenericMaxBucketed (to make the allocation direct) |
| 1420 // b) aligned at kPageAllocationGranularity boundaries after | 1437 // b) aligned at kPageAllocationGranularity boundaries after |
| 1421 // kPartitionPageSize has been added to it. | 1438 // kPartitionPageSize has been added to it. |
| 1422 // (On 32-bit, partitionAlloc adds another kSystemPageSize to the | 1439 // (On 32-bit, PartitionAlloc adds another kSystemPageSize to the |
| 1423 // allocation size before rounding, but there it marks the memory right | 1440 // allocation size before rounding, but there it marks the memory right |
| 1424 // after size as inaccessible, so it's fine to write 1 past the size we | 1441 // after size as inaccessible, so it's fine to write 1 past the size we |
| 1425 // hand to partitionAlloc and we don't need to worry about allocation | 1442 // hand to PartitionAlloc and we don't need to worry about allocation |
| 1426 // granularities.) | 1443 // granularities.) |
| 1427 #define ALIGN(N, A) (((N) + (A)-1) / (A) * (A)) | 1444 #define ALIGN(N, A) (((N) + (A)-1) / (A) * (A)) |
| 1428 const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, | 1445 const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, |
| 1429 kPageAllocationGranularity) - | 1446 kPageAllocationGranularity) - |
| 1430 kPartitionPageSize; | 1447 kPartitionPageSize; |
| 1431 #undef ALIGN | 1448 #undef ALIGN |
| 1432 static_assert(kSize > kGenericMaxBucketed, | 1449 static_assert(kSize > kGenericMaxBucketed, |
| 1433 "allocation not large enough for direct allocation"); | 1450 "allocation not large enough for direct allocation"); |
| 1434 size_t size = kSize - kExtraAllocSize; | 1451 size_t size = kSize - kExtraAllocSize; |
| 1435 void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1452 void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1436 | 1453 |
| 1437 EXPECT_TRUE(ptr); | 1454 EXPECT_TRUE(ptr); |
| 1438 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; | 1455 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; |
| 1439 | 1456 |
| 1440 EXPECT_DEATH(*(charPtr - 1) = 'A', ""); | 1457 EXPECT_DEATH(*(charPtr - 1) = 'A', ""); |
| 1441 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); | 1458 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); |
| 1442 | 1459 |
| 1443 partitionFreeGeneric(genericAllocator.root(), ptr); | 1460 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1444 | 1461 |
| 1445 TestShutdown(); | 1462 TestShutdown(); |
| 1446 } | 1463 } |
| 1447 | 1464 |
| 1448 // Check that a bad free() is caught where the free() refers to an unused | 1465 // Check that a bad free() is caught where the free() refers to an unused |
| 1449 // partition page of a large allocation. | 1466 // partition page of a large allocation. |
| 1450 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage) { | 1467 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage) { |
| 1451 TestSetup(); | 1468 TestSetup(); |
| 1452 | 1469 |
| 1453 // This large size will result in a direct mapped allocation with guard | 1470 // This large size will result in a direct mapped allocation with guard |
| 1454 // pages at either end. | 1471 // pages at either end. |
| 1455 void* ptr = partitionAllocGeneric(genericAllocator.root(), | 1472 void* ptr = PartitionAllocGeneric(generic_allocator.root(), |
| 1456 kPartitionPageSize * 2, typeName); | 1473 kPartitionPageSize * 2, type_name); |
| 1457 EXPECT_TRUE(ptr); | 1474 EXPECT_TRUE(ptr); |
| 1458 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; | 1475 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; |
| 1459 | 1476 |
| 1460 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); | 1477 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), ""); |
| 1461 | 1478 |
| 1462 partitionFreeGeneric(genericAllocator.root(), ptr); | 1479 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1463 | 1480 |
| 1464 TestShutdown(); | 1481 TestShutdown(); |
| 1465 } | 1482 } |
| 1466 | 1483 |
| 1467 #endif // !defined(OS_ANDROID) && !defined(OS_IOS) | 1484 #endif // !defined(OS_ANDROID) && !defined(OS_IOS) |
| 1468 | 1485 |
| 1469 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without | 1486 // Tests that PartitionDumpStatsGeneric and PartitionDumpStats runs without |
| 1470 // crashing and returns non zero values when memory is allocated. | 1487 // crashing and returns non zero values when memory is allocated. |
| 1471 TEST(PartitionAllocTest, DumpMemoryStats) { | 1488 TEST(PartitionAllocTest, DumpMemoryStats) { |
| 1472 TestSetup(); | 1489 TestSetup(); |
| 1473 { | 1490 { |
| 1474 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); | 1491 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1475 MockPartitionStatsDumper mockStatsDumper; | 1492 MockPartitionStatsDumper mockStatsDumper; |
| 1476 partitionDumpStats(allocator.root(), "mock_allocator", | 1493 PartitionDumpStats(allocator.root(), "mock_allocator", |
| 1477 false /* detailed dump */, &mockStatsDumper); | 1494 false /* detailed dump */, &mockStatsDumper); |
| 1478 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded()); | 1495 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded()); |
| 1479 | 1496 |
| 1480 partitionFree(ptr); | 1497 PartitionFree(ptr); |
| 1481 } | 1498 } |
| 1482 | 1499 |
| 1483 // This series of tests checks the active -> empty -> decommitted states. | 1500 // This series of tests checks the active -> empty -> decommitted states. |
| 1484 { | 1501 { |
| 1485 void* genericPtr = partitionAllocGeneric(genericAllocator.root(), | 1502 void* genericPtr = PartitionAllocGeneric(generic_allocator.root(), |
| 1486 2048 - kExtraAllocSize, typeName); | 1503 2048 - kExtraAllocSize, type_name); |
| 1487 { | 1504 { |
| 1488 MockPartitionStatsDumper mockStatsDumperGeneric; | 1505 MockPartitionStatsDumper dumper; |
| 1489 partitionDumpStatsGeneric( | 1506 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1490 genericAllocator.root(), "mock_generic_allocator", | 1507 "mock_generic_allocator", |
| 1491 false /* detailed dump */, &mockStatsDumperGeneric); | 1508 false /* detailed dump */, &dumper); |
| 1492 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1509 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1493 | 1510 |
| 1494 const PartitionBucketMemoryStats* stats = | 1511 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1495 mockStatsDumperGeneric.GetBucketStats(2048); | 1512 EXPECT_TRUE(stats); |
| 1496 EXPECT_TRUE(stats); | 1513 EXPECT_TRUE(stats->is_valid); |
| 1497 EXPECT_TRUE(stats->isValid); | 1514 EXPECT_EQ(2048u, stats->bucket_slot_size); |
| 1498 EXPECT_EQ(2048u, stats->bucketSlotSize); | 1515 EXPECT_EQ(2048u, stats->active_bytes); |
| 1499 EXPECT_EQ(2048u, stats->activeBytes); | 1516 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); |
| 1500 EXPECT_EQ(kSystemPageSize, stats->residentBytes); | 1517 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1501 EXPECT_EQ(0u, stats->decommittableBytes); | 1518 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1502 EXPECT_EQ(0u, stats->discardableBytes); | 1519 EXPECT_EQ(0u, stats->num_full_pages); |
| 1503 EXPECT_EQ(0u, stats->numFullPages); | 1520 EXPECT_EQ(1u, stats->num_active_pages); |
| 1504 EXPECT_EQ(1u, stats->numActivePages); | 1521 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1505 EXPECT_EQ(0u, stats->numEmptyPages); | 1522 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1506 EXPECT_EQ(0u, stats->numDecommittedPages); | 1523 } |
| 1507 } | 1524 |
| 1508 | 1525 PartitionFreeGeneric(generic_allocator.root(), genericPtr); |
| 1509 partitionFreeGeneric(genericAllocator.root(), genericPtr); | 1526 |
| 1510 | 1527 { |
| 1511 { | 1528 MockPartitionStatsDumper dumper; |
| 1512 MockPartitionStatsDumper mockStatsDumperGeneric; | 1529 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1513 partitionDumpStatsGeneric( | 1530 "mock_generic_allocator", |
| 1514 genericAllocator.root(), "mock_generic_allocator", | 1531 false /* detailed dump */, &dumper); |
| 1515 false /* detailed dump */, &mockStatsDumperGeneric); | 1532 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1516 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1533 |
| 1517 | 1534 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1518 const PartitionBucketMemoryStats* stats = | 1535 EXPECT_TRUE(stats); |
| 1519 mockStatsDumperGeneric.GetBucketStats(2048); | 1536 EXPECT_TRUE(stats->is_valid); |
| 1520 EXPECT_TRUE(stats); | 1537 EXPECT_EQ(2048u, stats->bucket_slot_size); |
| 1521 EXPECT_TRUE(stats->isValid); | 1538 EXPECT_EQ(0u, stats->active_bytes); |
| 1522 EXPECT_EQ(2048u, stats->bucketSlotSize); | 1539 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); |
| 1523 EXPECT_EQ(0u, stats->activeBytes); | 1540 EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); |
| 1524 EXPECT_EQ(kSystemPageSize, stats->residentBytes); | 1541 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1525 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes); | 1542 EXPECT_EQ(0u, stats->num_full_pages); |
| 1526 EXPECT_EQ(0u, stats->discardableBytes); | 1543 EXPECT_EQ(0u, stats->num_active_pages); |
| 1527 EXPECT_EQ(0u, stats->numFullPages); | 1544 EXPECT_EQ(1u, stats->num_empty_pages); |
| 1528 EXPECT_EQ(0u, stats->numActivePages); | 1545 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1529 EXPECT_EQ(1u, stats->numEmptyPages); | |
| 1530 EXPECT_EQ(0u, stats->numDecommittedPages); | |
| 1531 } | 1546 } |
| 1532 | 1547 |
| 1533 CycleGenericFreeCache(kTestAllocSize); | 1548 CycleGenericFreeCache(kTestAllocSize); |
| 1534 | 1549 |
| 1535 { | 1550 { |
| 1536 MockPartitionStatsDumper mockStatsDumperGeneric; | 1551 MockPartitionStatsDumper dumper; |
| 1537 partitionDumpStatsGeneric( | 1552 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1538 genericAllocator.root(), "mock_generic_allocator", | 1553 "mock_generic_allocator", |
| 1539 false /* detailed dump */, &mockStatsDumperGeneric); | 1554 false /* detailed dump */, &dumper); |
| 1540 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1555 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1541 | 1556 |
| 1542 const PartitionBucketMemoryStats* stats = | 1557 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1543 mockStatsDumperGeneric.GetBucketStats(2048); | 1558 EXPECT_TRUE(stats); |
| 1544 EXPECT_TRUE(stats); | 1559 EXPECT_TRUE(stats->is_valid); |
| 1545 EXPECT_TRUE(stats->isValid); | 1560 EXPECT_EQ(2048u, stats->bucket_slot_size); |
| 1546 EXPECT_EQ(2048u, stats->bucketSlotSize); | 1561 EXPECT_EQ(0u, stats->active_bytes); |
| 1547 EXPECT_EQ(0u, stats->activeBytes); | 1562 EXPECT_EQ(0u, stats->resident_bytes); |
| 1548 EXPECT_EQ(0u, stats->residentBytes); | 1563 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1549 EXPECT_EQ(0u, stats->decommittableBytes); | 1564 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1550 EXPECT_EQ(0u, stats->discardableBytes); | 1565 EXPECT_EQ(0u, stats->num_full_pages); |
| 1551 EXPECT_EQ(0u, stats->numFullPages); | 1566 EXPECT_EQ(0u, stats->num_active_pages); |
| 1552 EXPECT_EQ(0u, stats->numActivePages); | 1567 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1553 EXPECT_EQ(0u, stats->numEmptyPages); | 1568 EXPECT_EQ(1u, stats->num_decommitted_pages); |
| 1554 EXPECT_EQ(1u, stats->numDecommittedPages); | |
| 1555 } | 1569 } |
| 1556 } | 1570 } |
| 1557 | 1571 |
| 1558 // This test checks for correct empty page list accounting. | 1572 // This test checks for correct empty page list accounting. |
| 1559 { | 1573 { |
| 1560 size_t size = kPartitionPageSize - kExtraAllocSize; | 1574 size_t size = kPartitionPageSize - kExtraAllocSize; |
| 1561 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1575 void* ptr1 = |
| 1562 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1576 PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1563 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1577 void* ptr2 = |
| 1564 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1578 PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1579 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1580 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1565 | 1581 |
| 1566 CycleGenericFreeCache(kTestAllocSize); | 1582 CycleGenericFreeCache(kTestAllocSize); |
| 1567 | 1583 |
| 1568 ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1584 ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1569 | 1585 |
| 1570 { | 1586 { |
| 1571 MockPartitionStatsDumper mockStatsDumperGeneric; | 1587 MockPartitionStatsDumper dumper; |
| 1572 partitionDumpStatsGeneric( | 1588 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1573 genericAllocator.root(), "mock_generic_allocator", | 1589 "mock_generic_allocator", |
| 1574 false /* detailed dump */, &mockStatsDumperGeneric); | 1590 false /* detailed dump */, &dumper); |
| 1575 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1591 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1576 | 1592 |
| 1577 const PartitionBucketMemoryStats* stats = | 1593 const PartitionBucketMemoryStats* stats = |
| 1578 mockStatsDumperGeneric.GetBucketStats(kPartitionPageSize); | 1594 dumper.GetBucketStats(kPartitionPageSize); |
| 1579 EXPECT_TRUE(stats); | 1595 EXPECT_TRUE(stats); |
| 1580 EXPECT_TRUE(stats->isValid); | 1596 EXPECT_TRUE(stats->is_valid); |
| 1581 EXPECT_EQ(kPartitionPageSize, stats->bucketSlotSize); | 1597 EXPECT_EQ(kPartitionPageSize, stats->bucket_slot_size); |
| 1582 EXPECT_EQ(kPartitionPageSize, stats->activeBytes); | 1598 EXPECT_EQ(kPartitionPageSize, stats->active_bytes); |
| 1583 EXPECT_EQ(kPartitionPageSize, stats->residentBytes); | 1599 EXPECT_EQ(kPartitionPageSize, stats->resident_bytes); |
| 1584 EXPECT_EQ(0u, stats->decommittableBytes); | 1600 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1585 EXPECT_EQ(0u, stats->discardableBytes); | 1601 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1586 EXPECT_EQ(1u, stats->numFullPages); | 1602 EXPECT_EQ(1u, stats->num_full_pages); |
| 1587 EXPECT_EQ(0u, stats->numActivePages); | 1603 EXPECT_EQ(0u, stats->num_active_pages); |
| 1588 EXPECT_EQ(0u, stats->numEmptyPages); | 1604 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1589 EXPECT_EQ(1u, stats->numDecommittedPages); | 1605 EXPECT_EQ(1u, stats->num_decommitted_pages); |
| 1590 } | 1606 } |
| 1591 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1607 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1592 } | 1608 } |
| 1593 | 1609 |
| 1594 // This test checks for correct direct mapped accounting. | 1610 // This test checks for correct direct mapped accounting. |
| 1595 { | 1611 { |
| 1596 size_t sizeSmaller = kGenericMaxBucketed + 1; | 1612 size_t size_smaller = kGenericMaxBucketed + 1; |
| 1597 size_t sizeBigger = (kGenericMaxBucketed * 2) + 1; | 1613 size_t size_bigger = (kGenericMaxBucketed * 2) + 1; |
| 1598 size_t realSizeSmaller = | 1614 size_t real_size_smaller = |
| 1599 (sizeSmaller + kSystemPageOffsetMask) & kSystemPageBaseMask; | 1615 (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 1600 size_t realSizeBigger = | 1616 size_t real_size_bigger = |
| 1601 (sizeBigger + kSystemPageOffsetMask) & kSystemPageBaseMask; | 1617 (size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 1602 void* ptr = | 1618 void* ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, |
| 1603 partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeName); | 1619 type_name); |
| 1604 void* ptr2 = | 1620 void* ptr2 = |
| 1605 partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeName); | 1621 PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); |
| 1606 | 1622 |
| 1607 { | 1623 { |
| 1608 MockPartitionStatsDumper mockStatsDumperGeneric; | 1624 MockPartitionStatsDumper dumper; |
| 1609 partitionDumpStatsGeneric( | 1625 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1610 genericAllocator.root(), "mock_generic_allocator", | 1626 "mock_generic_allocator", |
| 1611 false /* detailed dump */, &mockStatsDumperGeneric); | 1627 false /* detailed dump */, &dumper); |
| 1612 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1628 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1613 | 1629 |
| 1614 const PartitionBucketMemoryStats* stats = | 1630 const PartitionBucketMemoryStats* stats = |
| 1615 mockStatsDumperGeneric.GetBucketStats(realSizeSmaller); | 1631 dumper.GetBucketStats(real_size_smaller); |
| 1616 EXPECT_TRUE(stats); | 1632 EXPECT_TRUE(stats); |
| 1617 EXPECT_TRUE(stats->isValid); | 1633 EXPECT_TRUE(stats->is_valid); |
| 1618 EXPECT_TRUE(stats->isDirectMap); | 1634 EXPECT_TRUE(stats->is_direct_map); |
| 1619 EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize); | 1635 EXPECT_EQ(real_size_smaller, stats->bucket_slot_size); |
| 1620 EXPECT_EQ(realSizeSmaller, stats->activeBytes); | 1636 EXPECT_EQ(real_size_smaller, stats->active_bytes); |
| 1621 EXPECT_EQ(realSizeSmaller, stats->residentBytes); | 1637 EXPECT_EQ(real_size_smaller, stats->resident_bytes); |
| 1622 EXPECT_EQ(0u, stats->decommittableBytes); | 1638 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1623 EXPECT_EQ(0u, stats->discardableBytes); | 1639 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1624 EXPECT_EQ(1u, stats->numFullPages); | 1640 EXPECT_EQ(1u, stats->num_full_pages); |
| 1625 EXPECT_EQ(0u, stats->numActivePages); | 1641 EXPECT_EQ(0u, stats->num_active_pages); |
| 1626 EXPECT_EQ(0u, stats->numEmptyPages); | 1642 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1627 EXPECT_EQ(0u, stats->numDecommittedPages); | 1643 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1628 | 1644 |
| 1629 stats = mockStatsDumperGeneric.GetBucketStats(realSizeBigger); | 1645 stats = dumper.GetBucketStats(real_size_bigger); |
| 1630 EXPECT_TRUE(stats); | 1646 EXPECT_TRUE(stats); |
| 1631 EXPECT_TRUE(stats->isValid); | 1647 EXPECT_TRUE(stats->is_valid); |
| 1632 EXPECT_TRUE(stats->isDirectMap); | 1648 EXPECT_TRUE(stats->is_direct_map); |
| 1633 EXPECT_EQ(realSizeBigger, stats->bucketSlotSize); | 1649 EXPECT_EQ(real_size_bigger, stats->bucket_slot_size); |
| 1634 EXPECT_EQ(realSizeBigger, stats->activeBytes); | 1650 EXPECT_EQ(real_size_bigger, stats->active_bytes); |
| 1635 EXPECT_EQ(realSizeBigger, stats->residentBytes); | 1651 EXPECT_EQ(real_size_bigger, stats->resident_bytes); |
| 1636 EXPECT_EQ(0u, stats->decommittableBytes); | 1652 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1637 EXPECT_EQ(0u, stats->discardableBytes); | 1653 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1638 EXPECT_EQ(1u, stats->numFullPages); | 1654 EXPECT_EQ(1u, stats->num_full_pages); |
| 1639 EXPECT_EQ(0u, stats->numActivePages); | 1655 EXPECT_EQ(0u, stats->num_active_pages); |
| 1640 EXPECT_EQ(0u, stats->numEmptyPages); | 1656 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1641 EXPECT_EQ(0u, stats->numDecommittedPages); | 1657 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1642 } | 1658 } |
| 1643 | 1659 |
| 1644 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1660 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1645 partitionFreeGeneric(genericAllocator.root(), ptr); | 1661 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1646 | 1662 |
| 1647 // Whilst we're here, allocate again and free with different ordering | 1663 // Whilst we're here, allocate again and free with different ordering |
| 1648 // to give a workout to our linked list code. | 1664 // to give a workout to our linked list code. |
| 1649 ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeName); | 1665 ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, |
| 1650 ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeName); | 1666 type_name); |
| 1651 partitionFreeGeneric(genericAllocator.root(), ptr); | 1667 ptr2 = |
| 1652 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1668 PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); |
| 1669 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1670 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1653 } | 1671 } |
| 1654 | 1672 |
| 1655 // This test checks large-but-not-quite-direct allocations. | 1673 // This test checks large-but-not-quite-direct allocations. |
| 1656 { | 1674 { |
| 1657 void* ptr = | 1675 void* ptr = |
| 1658 partitionAllocGeneric(genericAllocator.root(), 65536 + 1, typeName); | 1676 PartitionAllocGeneric(generic_allocator.root(), 65536 + 1, type_name); |
| 1659 | 1677 |
| 1660 { | 1678 { |
| 1661 MockPartitionStatsDumper mockStatsDumperGeneric; | 1679 MockPartitionStatsDumper dumper; |
| 1662 partitionDumpStatsGeneric( | 1680 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1663 genericAllocator.root(), "mock_generic_allocator", | 1681 "mock_generic_allocator", |
| 1664 false /* detailed dump */, &mockStatsDumperGeneric); | 1682 false /* detailed dump */, &dumper); |
| 1665 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1683 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1666 | 1684 |
| 1667 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); | 1685 size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); |
| 1668 const PartitionBucketMemoryStats* stats = | 1686 const PartitionBucketMemoryStats* stats = |
| 1669 mockStatsDumperGeneric.GetBucketStats(slotSize); | 1687 dumper.GetBucketStats(slot_size); |
| 1670 EXPECT_TRUE(stats); | 1688 EXPECT_TRUE(stats); |
| 1671 EXPECT_TRUE(stats->isValid); | 1689 EXPECT_TRUE(stats->is_valid); |
| 1672 EXPECT_FALSE(stats->isDirectMap); | 1690 EXPECT_FALSE(stats->is_direct_map); |
| 1673 EXPECT_EQ(slotSize, stats->bucketSlotSize); | 1691 EXPECT_EQ(slot_size, stats->bucket_slot_size); |
| 1674 EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->activeBytes); | 1692 EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->active_bytes); |
| 1675 EXPECT_EQ(slotSize, stats->residentBytes); | 1693 EXPECT_EQ(slot_size, stats->resident_bytes); |
| 1676 EXPECT_EQ(0u, stats->decommittableBytes); | 1694 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1677 EXPECT_EQ(kSystemPageSize, stats->discardableBytes); | 1695 EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); |
| 1678 EXPECT_EQ(1u, stats->numFullPages); | 1696 EXPECT_EQ(1u, stats->num_full_pages); |
| 1679 EXPECT_EQ(0u, stats->numActivePages); | 1697 EXPECT_EQ(0u, stats->num_active_pages); |
| 1680 EXPECT_EQ(0u, stats->numEmptyPages); | 1698 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1681 EXPECT_EQ(0u, stats->numDecommittedPages); | 1699 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1682 } | 1700 } |
| 1683 | 1701 |
| 1684 partitionFreeGeneric(genericAllocator.root(), ptr); | 1702 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1685 | 1703 |
| 1686 { | 1704 { |
| 1687 MockPartitionStatsDumper mockStatsDumperGeneric; | 1705 MockPartitionStatsDumper dumper; |
| 1688 partitionDumpStatsGeneric( | 1706 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1689 genericAllocator.root(), "mock_generic_allocator", | 1707 "mock_generic_allocator", |
| 1690 false /* detailed dump */, &mockStatsDumperGeneric); | 1708 false /* detailed dump */, &dumper); |
| 1691 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1709 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1692 | 1710 |
| 1693 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); | 1711 size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); |
| 1694 const PartitionBucketMemoryStats* stats = | 1712 const PartitionBucketMemoryStats* stats = |
| 1695 mockStatsDumperGeneric.GetBucketStats(slotSize); | 1713 dumper.GetBucketStats(slot_size); |
| 1696 EXPECT_TRUE(stats); | 1714 EXPECT_TRUE(stats); |
| 1697 EXPECT_TRUE(stats->isValid); | 1715 EXPECT_TRUE(stats->is_valid); |
| 1698 EXPECT_FALSE(stats->isDirectMap); | 1716 EXPECT_FALSE(stats->is_direct_map); |
| 1699 EXPECT_EQ(slotSize, stats->bucketSlotSize); | 1717 EXPECT_EQ(slot_size, stats->bucket_slot_size); |
| 1700 EXPECT_EQ(0u, stats->activeBytes); | 1718 EXPECT_EQ(0u, stats->active_bytes); |
| 1701 EXPECT_EQ(slotSize, stats->residentBytes); | 1719 EXPECT_EQ(slot_size, stats->resident_bytes); |
| 1702 EXPECT_EQ(slotSize, stats->decommittableBytes); | 1720 EXPECT_EQ(slot_size, stats->decommittable_bytes); |
| 1703 EXPECT_EQ(0u, stats->numFullPages); | 1721 EXPECT_EQ(0u, stats->num_full_pages); |
| 1704 EXPECT_EQ(0u, stats->numActivePages); | 1722 EXPECT_EQ(0u, stats->num_active_pages); |
| 1705 EXPECT_EQ(1u, stats->numEmptyPages); | 1723 EXPECT_EQ(1u, stats->num_empty_pages); |
| 1706 EXPECT_EQ(0u, stats->numDecommittedPages); | 1724 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1707 } | 1725 } |
| 1708 | 1726 |
| 1709 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), | 1727 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), |
| 1710 65536 + kSystemPageSize + 1, typeName); | 1728 65536 + kSystemPageSize + 1, type_name); |
| 1711 EXPECT_EQ(ptr, ptr2); | 1729 EXPECT_EQ(ptr, ptr2); |
| 1712 | 1730 |
| 1713 { | 1731 { |
| 1714 MockPartitionStatsDumper mockStatsDumperGeneric; | 1732 MockPartitionStatsDumper dumper; |
| 1715 partitionDumpStatsGeneric( | 1733 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1716 genericAllocator.root(), "mock_generic_allocator", | 1734 "mock_generic_allocator", |
| 1717 false /* detailed dump */, &mockStatsDumperGeneric); | 1735 false /* detailed dump */, &dumper); |
| 1718 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1736 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1719 | 1737 |
| 1720 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); | 1738 size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); |
| 1721 const PartitionBucketMemoryStats* stats = | 1739 const PartitionBucketMemoryStats* stats = |
| 1722 mockStatsDumperGeneric.GetBucketStats(slotSize); | 1740 dumper.GetBucketStats(slot_size); |
| 1723 EXPECT_TRUE(stats); | 1741 EXPECT_TRUE(stats); |
| 1724 EXPECT_TRUE(stats->isValid); | 1742 EXPECT_TRUE(stats->is_valid); |
| 1725 EXPECT_FALSE(stats->isDirectMap); | 1743 EXPECT_FALSE(stats->is_direct_map); |
| 1726 EXPECT_EQ(slotSize, stats->bucketSlotSize); | 1744 EXPECT_EQ(slot_size, stats->bucket_slot_size); |
| 1727 EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize, | 1745 EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize, |
| 1728 stats->activeBytes); | 1746 stats->active_bytes); |
| 1729 EXPECT_EQ(slotSize, stats->residentBytes); | 1747 EXPECT_EQ(slot_size, stats->resident_bytes); |
| 1730 EXPECT_EQ(0u, stats->decommittableBytes); | 1748 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1731 EXPECT_EQ(0u, stats->discardableBytes); | 1749 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1732 EXPECT_EQ(1u, stats->numFullPages); | 1750 EXPECT_EQ(1u, stats->num_full_pages); |
| 1733 EXPECT_EQ(0u, stats->numActivePages); | 1751 EXPECT_EQ(0u, stats->num_active_pages); |
| 1734 EXPECT_EQ(0u, stats->numEmptyPages); | 1752 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1735 EXPECT_EQ(0u, stats->numDecommittedPages); | 1753 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1736 } | 1754 } |
| 1737 | 1755 |
| 1738 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1756 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1739 } | 1757 } |
| 1740 | 1758 |
| 1741 TestShutdown(); | 1759 TestShutdown(); |
| 1742 } | 1760 } |
| 1743 | 1761 |
| 1744 // Tests the API to purge freeable memory. | 1762 // Tests the API to purge freeable memory. |
| 1745 TEST(PartitionAllocTest, Purge) { | 1763 TEST(PartitionAllocTest, Purge) { |
| 1746 TestSetup(); | 1764 TestSetup(); |
| 1747 | 1765 |
| 1748 char* ptr = reinterpret_cast<char*>(partitionAllocGeneric( | 1766 char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric( |
| 1749 genericAllocator.root(), 2048 - kExtraAllocSize, typeName)); | 1767 generic_allocator.root(), 2048 - kExtraAllocSize, type_name)); |
| 1750 partitionFreeGeneric(genericAllocator.root(), ptr); | 1768 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1751 { | 1769 { |
| 1752 MockPartitionStatsDumper mockStatsDumperGeneric; | 1770 MockPartitionStatsDumper dumper; |
| 1753 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", | 1771 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1754 false /* detailed dump */, | 1772 "mock_generic_allocator", |
| 1755 &mockStatsDumperGeneric); | 1773 false /* detailed dump */, &dumper); |
| 1756 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1774 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1757 | 1775 |
| 1758 const PartitionBucketMemoryStats* stats = | 1776 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1759 mockStatsDumperGeneric.GetBucketStats(2048); | |
| 1760 EXPECT_TRUE(stats); | 1777 EXPECT_TRUE(stats); |
| 1761 EXPECT_TRUE(stats->isValid); | 1778 EXPECT_TRUE(stats->is_valid); |
| 1762 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes); | 1779 EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); |
| 1763 EXPECT_EQ(kSystemPageSize, stats->residentBytes); | 1780 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); |
| 1764 } | 1781 } |
| 1765 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1782 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1766 PartitionPurgeDecommitEmptyPages); | 1783 PartitionPurgeDecommitEmptyPages); |
| 1767 { | 1784 { |
| 1768 MockPartitionStatsDumper mockStatsDumperGeneric; | 1785 MockPartitionStatsDumper dumper; |
| 1769 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", | 1786 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1770 false /* detailed dump */, | 1787 "mock_generic_allocator", |
| 1771 &mockStatsDumperGeneric); | 1788 false /* detailed dump */, &dumper); |
| 1772 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1789 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1773 | 1790 |
| 1774 const PartitionBucketMemoryStats* stats = | 1791 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1775 mockStatsDumperGeneric.GetBucketStats(2048); | |
| 1776 EXPECT_TRUE(stats); | 1792 EXPECT_TRUE(stats); |
| 1777 EXPECT_TRUE(stats->isValid); | 1793 EXPECT_TRUE(stats->is_valid); |
| 1778 EXPECT_EQ(0u, stats->decommittableBytes); | 1794 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1779 EXPECT_EQ(0u, stats->residentBytes); | 1795 EXPECT_EQ(0u, stats->resident_bytes); |
| 1780 } | 1796 } |
| 1781 // Calling purge again here is a good way of testing we didn't mess up the | 1797 // Calling purge again here is a good way of testing we didn't mess up the |
| 1782 // state of the free cache ring. | 1798 // state of the free cache ring. |
| 1783 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1799 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1784 PartitionPurgeDecommitEmptyPages); | 1800 PartitionPurgeDecommitEmptyPages); |
| 1785 | 1801 |
| 1786 char* bigPtr = reinterpret_cast<char*>( | 1802 char* bigPtr = reinterpret_cast<char*>( |
| 1787 partitionAllocGeneric(genericAllocator.root(), 256 * 1024, typeName)); | 1803 PartitionAllocGeneric(generic_allocator.root(), 256 * 1024, type_name)); |
| 1788 partitionFreeGeneric(genericAllocator.root(), bigPtr); | 1804 PartitionFreeGeneric(generic_allocator.root(), bigPtr); |
| 1789 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1805 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1790 PartitionPurgeDecommitEmptyPages); | 1806 PartitionPurgeDecommitEmptyPages); |
| 1791 | 1807 |
| 1792 CheckPageInCore(ptr - kPointerOffset, false); | 1808 CheckPageInCore(ptr - kPointerOffset, false); |
| 1793 CheckPageInCore(bigPtr - kPointerOffset, false); | 1809 CheckPageInCore(bigPtr - kPointerOffset, false); |
| 1794 | 1810 |
| 1795 TestShutdown(); | 1811 TestShutdown(); |
| 1796 } | 1812 } |
| 1797 | 1813 |
| 1798 // Tests that we prefer to allocate into a non-empty partition page over an | 1814 // Tests that we prefer to allocate into a non-empty partition page over an |
| 1799 // empty one. This is an important aspect of minimizing memory usage for some | 1815 // empty one. This is an important aspect of minimizing memory usage for some |
| 1800 // allocation sizes, particularly larger ones. | 1816 // allocation sizes, particularly larger ones. |
| 1801 TEST(PartitionAllocTest, PreferActiveOverEmpty) { | 1817 TEST(PartitionAllocTest, PreferActiveOverEmpty) { |
| 1802 TestSetup(); | 1818 TestSetup(); |
| 1803 | 1819 |
| 1804 size_t size = (kSystemPageSize * 2) - kExtraAllocSize; | 1820 size_t size = (kSystemPageSize * 2) - kExtraAllocSize; |
| 1805 // Allocate 3 full slot spans worth of 8192-byte allocations. | 1821 // Allocate 3 full slot spans worth of 8192-byte allocations. |
| 1806 // Each slot span for this size is 16384 bytes, or 1 partition page and 2 | 1822 // Each slot span for this size is 16384 bytes, or 1 partition page and 2 |
| 1807 // slots. | 1823 // slots. |
| 1808 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1824 void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1809 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1825 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1810 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1826 void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1811 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1827 void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1812 void* ptr5 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1828 void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1813 void* ptr6 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1829 void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1814 | 1830 |
| 1815 PartitionPage* page1 = | 1831 PartitionPage* page1 = |
| 1816 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); | 1832 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); |
| 1817 PartitionPage* page2 = | 1833 PartitionPage* page2 = |
| 1818 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3)); | 1834 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3)); |
| 1819 PartitionPage* page3 = | 1835 PartitionPage* page3 = |
| 1820 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr6)); | 1836 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr6)); |
| 1821 EXPECT_NE(page1, page2); | 1837 EXPECT_NE(page1, page2); |
| 1822 EXPECT_NE(page2, page3); | 1838 EXPECT_NE(page2, page3); |
| 1823 PartitionBucket* bucket = page1->bucket; | 1839 PartitionBucket* bucket = page1->bucket; |
| 1824 EXPECT_EQ(page3, bucket->activePagesHead); | 1840 EXPECT_EQ(page3, bucket->active_pages_head); |
| 1825 | 1841 |
| 1826 // Free up the 2nd slot in each slot span. | 1842 // Free up the 2nd slot in each slot span. |
| 1827 // This leaves the active list containing 3 pages, each with 1 used and 1 | 1843 // This leaves the active list containing 3 pages, each with 1 used and 1 |
| 1828 // free slot. The active page will be the one containing ptr1. | 1844 // free slot. The active page will be the one containing ptr1. |
| 1829 partitionFreeGeneric(genericAllocator.root(), ptr6); | 1845 PartitionFreeGeneric(generic_allocator.root(), ptr6); |
| 1830 partitionFreeGeneric(genericAllocator.root(), ptr4); | 1846 PartitionFreeGeneric(generic_allocator.root(), ptr4); |
| 1831 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1847 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1832 EXPECT_EQ(page1, bucket->activePagesHead); | 1848 EXPECT_EQ(page1, bucket->active_pages_head); |
| 1833 | 1849 |
| 1834 // Empty the middle page in the active list. | 1850 // Empty the middle page in the active list. |
| 1835 partitionFreeGeneric(genericAllocator.root(), ptr3); | 1851 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 1836 EXPECT_EQ(page1, bucket->activePagesHead); | 1852 EXPECT_EQ(page1, bucket->active_pages_head); |
| 1837 | 1853 |
| 1838 // Empty the the first page in the active list -- also the current page. | 1854 // Empty the the first page in the active list -- also the current page. |
| 1839 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1855 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1840 | 1856 |
| 1841 // A good choice here is to re-fill the third page since the first two are | 1857 // A good choice here is to re-fill the third page since the first two are |
| 1842 // empty. We used to fail that. | 1858 // empty. We used to fail that. |
| 1843 void* ptr7 = partitionAllocGeneric(genericAllocator.root(), size, typeName); | 1859 void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1844 EXPECT_EQ(ptr6, ptr7); | 1860 EXPECT_EQ(ptr6, ptr7); |
| 1845 EXPECT_EQ(page3, bucket->activePagesHead); | 1861 EXPECT_EQ(page3, bucket->active_pages_head); |
| 1846 | 1862 |
| 1847 partitionFreeGeneric(genericAllocator.root(), ptr5); | 1863 PartitionFreeGeneric(generic_allocator.root(), ptr5); |
| 1848 partitionFreeGeneric(genericAllocator.root(), ptr7); | 1864 PartitionFreeGeneric(generic_allocator.root(), ptr7); |
| 1849 | 1865 |
| 1850 TestShutdown(); | 1866 TestShutdown(); |
| 1851 } | 1867 } |
| 1852 | 1868 |
| 1853 // Tests the API to purge discardable memory. | 1869 // Tests the API to purge discardable memory. |
| 1854 TEST(PartitionAllocTest, PurgeDiscardable) { | 1870 TEST(PartitionAllocTest, PurgeDiscardable) { |
| 1855 TestSetup(); | 1871 TestSetup(); |
| 1856 | 1872 |
| 1857 // Free the second of two 4096 byte allocations and then purge. | 1873 // Free the second of two 4096 byte allocations and then purge. |
| 1858 { | 1874 { |
| 1859 void* ptr1 = partitionAllocGeneric( | 1875 void* ptr1 = PartitionAllocGeneric( |
| 1860 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 1876 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 1861 char* ptr2 = reinterpret_cast<char*>(partitionAllocGeneric( | 1877 char* ptr2 = reinterpret_cast<char*>( |
| 1862 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); | 1878 PartitionAllocGeneric(generic_allocator.root(), |
| 1863 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1879 kSystemPageSize - kExtraAllocSize, type_name)); |
| 1880 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1864 PartitionPage* page = | 1881 PartitionPage* page = |
| 1865 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); | 1882 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); |
| 1866 EXPECT_EQ(2u, page->numUnprovisionedSlots); | 1883 EXPECT_EQ(2u, page->num_unprovisioned_slots); |
| 1867 { | 1884 { |
| 1868 MockPartitionStatsDumper mockStatsDumperGeneric; | 1885 MockPartitionStatsDumper dumper; |
| 1869 partitionDumpStatsGeneric( | 1886 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1870 genericAllocator.root(), "mock_generic_allocator", | 1887 "mock_generic_allocator", |
| 1871 false /* detailed dump */, &mockStatsDumperGeneric); | 1888 false /* detailed dump */, &dumper); |
| 1872 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1889 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1873 | 1890 |
| 1874 const PartitionBucketMemoryStats* stats = | 1891 const PartitionBucketMemoryStats* stats = |
| 1875 mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); | 1892 dumper.GetBucketStats(kSystemPageSize); |
| 1876 EXPECT_TRUE(stats); | 1893 EXPECT_TRUE(stats); |
| 1877 EXPECT_TRUE(stats->isValid); | 1894 EXPECT_TRUE(stats->is_valid); |
| 1878 EXPECT_EQ(0u, stats->decommittableBytes); | 1895 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1879 EXPECT_EQ(kSystemPageSize, stats->discardableBytes); | 1896 EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); |
| 1880 EXPECT_EQ(kSystemPageSize, stats->activeBytes); | 1897 EXPECT_EQ(kSystemPageSize, stats->active_bytes); |
| 1881 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes); | 1898 EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); |
| 1882 } | 1899 } |
| 1883 CheckPageInCore(ptr2 - kPointerOffset, true); | 1900 CheckPageInCore(ptr2 - kPointerOffset, true); |
| 1884 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1901 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1885 PartitionPurgeDiscardUnusedSystemPages); | 1902 PartitionPurgeDiscardUnusedSystemPages); |
| 1886 CheckPageInCore(ptr2 - kPointerOffset, false); | 1903 CheckPageInCore(ptr2 - kPointerOffset, false); |
| 1887 EXPECT_EQ(3u, page->numUnprovisionedSlots); | 1904 EXPECT_EQ(3u, page->num_unprovisioned_slots); |
| 1888 | 1905 |
| 1889 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1906 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1890 } | 1907 } |
| 1891 // Free the first of two 4096 byte allocations and then purge. | 1908 // Free the first of two 4096 byte allocations and then purge. |
| 1892 { | 1909 { |
| 1893 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 1910 char* ptr1 = reinterpret_cast<char*>( |
| 1894 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); | 1911 PartitionAllocGeneric(generic_allocator.root(), |
| 1895 void* ptr2 = partitionAllocGeneric( | 1912 kSystemPageSize - kExtraAllocSize, type_name)); |
| 1896 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 1913 void* ptr2 = PartitionAllocGeneric( |
| 1897 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1914 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 1915 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1898 { | 1916 { |
| 1899 MockPartitionStatsDumper mockStatsDumperGeneric; | 1917 MockPartitionStatsDumper dumper; |
| 1900 partitionDumpStatsGeneric( | 1918 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1901 genericAllocator.root(), "mock_generic_allocator", | 1919 "mock_generic_allocator", |
| 1902 false /* detailed dump */, &mockStatsDumperGeneric); | 1920 false /* detailed dump */, &dumper); |
| 1903 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1921 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1904 | 1922 |
| 1905 const PartitionBucketMemoryStats* stats = | 1923 const PartitionBucketMemoryStats* stats = |
| 1906 mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); | 1924 dumper.GetBucketStats(kSystemPageSize); |
| 1907 EXPECT_TRUE(stats); | 1925 EXPECT_TRUE(stats); |
| 1908 EXPECT_TRUE(stats->isValid); | 1926 EXPECT_TRUE(stats->is_valid); |
| 1909 EXPECT_EQ(0u, stats->decommittableBytes); | 1927 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1910 EXPECT_EQ(kSystemPageSize, stats->discardableBytes); | 1928 EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); |
| 1911 EXPECT_EQ(kSystemPageSize, stats->activeBytes); | 1929 EXPECT_EQ(kSystemPageSize, stats->active_bytes); |
| 1912 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes); | 1930 EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); |
| 1913 } | 1931 } |
| 1914 CheckPageInCore(ptr1 - kPointerOffset, true); | 1932 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 1915 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1933 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1916 PartitionPurgeDiscardUnusedSystemPages); | 1934 PartitionPurgeDiscardUnusedSystemPages); |
| 1917 CheckPageInCore(ptr1 - kPointerOffset, false); | 1935 CheckPageInCore(ptr1 - kPointerOffset, false); |
| 1918 | 1936 |
| 1919 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1937 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1920 } | 1938 } |
| 1921 { | 1939 { |
| 1922 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 1940 char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( |
| 1923 genericAllocator.root(), 9216 - kExtraAllocSize, typeName)); | 1941 generic_allocator.root(), 9216 - kExtraAllocSize, type_name)); |
| 1924 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), | 1942 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), |
| 1925 9216 - kExtraAllocSize, typeName); | 1943 9216 - kExtraAllocSize, type_name); |
| 1926 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), | 1944 void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), |
| 1927 9216 - kExtraAllocSize, typeName); | 1945 9216 - kExtraAllocSize, type_name); |
| 1928 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), | 1946 void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), |
| 1929 9216 - kExtraAllocSize, typeName); | 1947 9216 - kExtraAllocSize, type_name); |
| 1930 memset(ptr1, 'A', 9216 - kExtraAllocSize); | 1948 memset(ptr1, 'A', 9216 - kExtraAllocSize); |
| 1931 memset(ptr2, 'A', 9216 - kExtraAllocSize); | 1949 memset(ptr2, 'A', 9216 - kExtraAllocSize); |
| 1932 partitionFreeGeneric(genericAllocator.root(), ptr2); | 1950 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1933 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1951 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1934 { | 1952 { |
| 1935 MockPartitionStatsDumper mockStatsDumperGeneric; | 1953 MockPartitionStatsDumper dumper; |
| 1936 partitionDumpStatsGeneric( | 1954 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1937 genericAllocator.root(), "mock_generic_allocator", | 1955 "mock_generic_allocator", |
| 1938 false /* detailed dump */, &mockStatsDumperGeneric); | 1956 false /* detailed dump */, &dumper); |
| 1939 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1957 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1940 | 1958 |
| 1941 const PartitionBucketMemoryStats* stats = | 1959 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216); |
| 1942 mockStatsDumperGeneric.GetBucketStats(9216); | |
| 1943 EXPECT_TRUE(stats); | 1960 EXPECT_TRUE(stats); |
| 1944 EXPECT_TRUE(stats->isValid); | 1961 EXPECT_TRUE(stats->is_valid); |
| 1945 EXPECT_EQ(0u, stats->decommittableBytes); | 1962 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1946 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); | 1963 EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); |
| 1947 EXPECT_EQ(9216u * 2, stats->activeBytes); | 1964 EXPECT_EQ(9216u * 2, stats->active_bytes); |
| 1948 EXPECT_EQ(9 * kSystemPageSize, stats->residentBytes); | 1965 EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes); |
| 1949 } | 1966 } |
| 1950 CheckPageInCore(ptr1 - kPointerOffset, true); | 1967 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 1951 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); | 1968 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); |
| 1952 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); | 1969 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); |
| 1953 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); | 1970 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); |
| 1954 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); | 1971 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); |
| 1955 partitionPurgeMemoryGeneric(genericAllocator.root(), | 1972 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1956 PartitionPurgeDiscardUnusedSystemPages); | 1973 PartitionPurgeDiscardUnusedSystemPages); |
| 1957 CheckPageInCore(ptr1 - kPointerOffset, true); | 1974 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 1958 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); | 1975 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); |
| 1959 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); | 1976 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); |
| 1960 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); | 1977 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); |
| 1961 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); | 1978 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); |
| 1962 | 1979 |
| 1963 partitionFreeGeneric(genericAllocator.root(), ptr3); | 1980 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 1964 partitionFreeGeneric(genericAllocator.root(), ptr4); | 1981 PartitionFreeGeneric(generic_allocator.root(), ptr4); |
| 1965 } | 1982 } |
| 1966 { | 1983 { |
| 1967 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 1984 char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( |
| 1968 genericAllocator.root(), (64 * kSystemPageSize) - kExtraAllocSize, | 1985 generic_allocator.root(), (64 * kSystemPageSize) - kExtraAllocSize, |
| 1969 typeName)); | 1986 type_name)); |
| 1970 memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize); | 1987 memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize); |
| 1971 partitionFreeGeneric(genericAllocator.root(), ptr1); | 1988 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 1972 ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 1989 ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( |
| 1973 genericAllocator.root(), (61 * kSystemPageSize) - kExtraAllocSize, | 1990 generic_allocator.root(), (61 * kSystemPageSize) - kExtraAllocSize, |
| 1974 typeName)); | 1991 type_name)); |
| 1975 { | 1992 { |
| 1976 MockPartitionStatsDumper mockStatsDumperGeneric; | 1993 MockPartitionStatsDumper dumper; |
| 1977 partitionDumpStatsGeneric( | 1994 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1978 genericAllocator.root(), "mock_generic_allocator", | 1995 "mock_generic_allocator", |
| 1979 false /* detailed dump */, &mockStatsDumperGeneric); | 1996 false /* detailed dump */, &dumper); |
| 1980 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 1997 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1981 | 1998 |
| 1982 const PartitionBucketMemoryStats* stats = | 1999 const PartitionBucketMemoryStats* stats = |
| 1983 mockStatsDumperGeneric.GetBucketStats(64 * kSystemPageSize); | 2000 dumper.GetBucketStats(64 * kSystemPageSize); |
| 1984 EXPECT_TRUE(stats); | 2001 EXPECT_TRUE(stats); |
| 1985 EXPECT_TRUE(stats->isValid); | 2002 EXPECT_TRUE(stats->is_valid); |
| 1986 EXPECT_EQ(0u, stats->decommittableBytes); | 2003 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1987 EXPECT_EQ(3 * kSystemPageSize, stats->discardableBytes); | 2004 EXPECT_EQ(3 * kSystemPageSize, stats->discardable_bytes); |
| 1988 EXPECT_EQ(61 * kSystemPageSize, stats->activeBytes); | 2005 EXPECT_EQ(61 * kSystemPageSize, stats->active_bytes); |
| 1989 EXPECT_EQ(64 * kSystemPageSize, stats->residentBytes); | 2006 EXPECT_EQ(64 * kSystemPageSize, stats->resident_bytes); |
| 1990 } | 2007 } |
| 1991 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); | 2008 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); |
| 1992 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true); | 2009 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true); |
| 1993 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true); | 2010 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true); |
| 1994 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true); | 2011 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true); |
| 1995 partitionPurgeMemoryGeneric(genericAllocator.root(), | 2012 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1996 PartitionPurgeDiscardUnusedSystemPages); | 2013 PartitionPurgeDiscardUnusedSystemPages); |
| 1997 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); | 2014 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); |
| 1998 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false); | 2015 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false); |
| 1999 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false); | 2016 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false); |
| 2000 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false); | 2017 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false); |
| 2001 | 2018 |
| 2002 partitionFreeGeneric(genericAllocator.root(), ptr1); | 2019 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 2003 } | 2020 } |
| 2004 // This sub-test tests truncation of the provisioned slots in a trickier | 2021 // This sub-test tests truncation of the provisioned slots in a trickier |
| 2005 // case where the freelist is rewritten. | 2022 // case where the freelist is rewritten. |
| 2006 partitionPurgeMemoryGeneric(genericAllocator.root(), | 2023 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 2007 PartitionPurgeDecommitEmptyPages); | 2024 PartitionPurgeDecommitEmptyPages); |
| 2008 { | 2025 { |
| 2009 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 2026 char* ptr1 = reinterpret_cast<char*>( |
| 2010 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); | 2027 PartitionAllocGeneric(generic_allocator.root(), |
| 2011 void* ptr2 = partitionAllocGeneric( | 2028 kSystemPageSize - kExtraAllocSize, type_name)); |
| 2012 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2029 void* ptr2 = PartitionAllocGeneric( |
| 2013 void* ptr3 = partitionAllocGeneric( | 2030 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2014 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2031 void* ptr3 = PartitionAllocGeneric( |
| 2015 void* ptr4 = partitionAllocGeneric( | 2032 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2016 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2033 void* ptr4 = PartitionAllocGeneric( |
| 2034 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2017 ptr1[0] = 'A'; | 2035 ptr1[0] = 'A'; |
| 2018 ptr1[kSystemPageSize] = 'A'; | 2036 ptr1[kSystemPageSize] = 'A'; |
| 2019 ptr1[kSystemPageSize * 2] = 'A'; | 2037 ptr1[kSystemPageSize * 2] = 'A'; |
| 2020 ptr1[kSystemPageSize * 3] = 'A'; | 2038 ptr1[kSystemPageSize * 3] = 'A'; |
| 2021 PartitionPage* page = | 2039 PartitionPage* page = |
| 2022 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); | 2040 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); |
| 2023 partitionFreeGeneric(genericAllocator.root(), ptr2); | 2041 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 2024 partitionFreeGeneric(genericAllocator.root(), ptr4); | 2042 PartitionFreeGeneric(generic_allocator.root(), ptr4); |
| 2025 partitionFreeGeneric(genericAllocator.root(), ptr1); | 2043 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 2026 EXPECT_EQ(0u, page->numUnprovisionedSlots); | 2044 EXPECT_EQ(0u, page->num_unprovisioned_slots); |
| 2027 | 2045 |
| 2028 { | 2046 { |
| 2029 MockPartitionStatsDumper mockStatsDumperGeneric; | 2047 MockPartitionStatsDumper dumper; |
| 2030 partitionDumpStatsGeneric( | 2048 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 2031 genericAllocator.root(), "mock_generic_allocator", | 2049 "mock_generic_allocator", |
| 2032 false /* detailed dump */, &mockStatsDumperGeneric); | 2050 false /* detailed dump */, &dumper); |
| 2033 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 2051 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 2034 | 2052 |
| 2035 const PartitionBucketMemoryStats* stats = | 2053 const PartitionBucketMemoryStats* stats = |
| 2036 mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); | 2054 dumper.GetBucketStats(kSystemPageSize); |
| 2037 EXPECT_TRUE(stats); | 2055 EXPECT_TRUE(stats); |
| 2038 EXPECT_TRUE(stats->isValid); | 2056 EXPECT_TRUE(stats->is_valid); |
| 2039 EXPECT_EQ(0u, stats->decommittableBytes); | 2057 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 2040 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); | 2058 EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); |
| 2041 EXPECT_EQ(kSystemPageSize, stats->activeBytes); | 2059 EXPECT_EQ(kSystemPageSize, stats->active_bytes); |
| 2042 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes); | 2060 EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes); |
| 2043 } | 2061 } |
| 2044 CheckPageInCore(ptr1 - kPointerOffset, true); | 2062 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 2045 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); | 2063 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); |
| 2046 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); | 2064 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); |
| 2047 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); | 2065 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); |
| 2048 partitionPurgeMemoryGeneric(genericAllocator.root(), | 2066 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 2049 PartitionPurgeDiscardUnusedSystemPages); | 2067 PartitionPurgeDiscardUnusedSystemPages); |
| 2050 EXPECT_EQ(1u, page->numUnprovisionedSlots); | 2068 EXPECT_EQ(1u, page->num_unprovisioned_slots); |
| 2051 CheckPageInCore(ptr1 - kPointerOffset, true); | 2069 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 2052 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); | 2070 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); |
| 2053 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); | 2071 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); |
| 2054 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); | 2072 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); |
| 2055 | 2073 |
| 2056 // Let's check we didn't brick the freelist. | 2074 // Let's check we didn't brick the freelist. |
| 2057 void* ptr1b = partitionAllocGeneric( | 2075 void* ptr1b = PartitionAllocGeneric( |
| 2058 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2076 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2059 EXPECT_EQ(ptr1, ptr1b); | 2077 EXPECT_EQ(ptr1, ptr1b); |
| 2060 void* ptr2b = partitionAllocGeneric( | 2078 void* ptr2b = PartitionAllocGeneric( |
| 2061 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2079 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2062 EXPECT_EQ(ptr2, ptr2b); | 2080 EXPECT_EQ(ptr2, ptr2b); |
| 2063 EXPECT_FALSE(page->freelistHead); | 2081 EXPECT_FALSE(page->freelist_head); |
| 2064 | 2082 |
| 2065 partitionFreeGeneric(genericAllocator.root(), ptr1); | 2083 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 2066 partitionFreeGeneric(genericAllocator.root(), ptr2); | 2084 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 2067 partitionFreeGeneric(genericAllocator.root(), ptr3); | 2085 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 2068 } | 2086 } |
| 2069 // This sub-test is similar, but tests a double-truncation. | 2087 // This sub-test is similar, but tests a double-truncation. |
| 2070 partitionPurgeMemoryGeneric(genericAllocator.root(), | 2088 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 2071 PartitionPurgeDecommitEmptyPages); | 2089 PartitionPurgeDecommitEmptyPages); |
| 2072 { | 2090 { |
| 2073 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( | 2091 char* ptr1 = reinterpret_cast<char*>( |
| 2074 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); | 2092 PartitionAllocGeneric(generic_allocator.root(), |
| 2075 void* ptr2 = partitionAllocGeneric( | 2093 kSystemPageSize - kExtraAllocSize, type_name)); |
| 2076 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2094 void* ptr2 = PartitionAllocGeneric( |
| 2077 void* ptr3 = partitionAllocGeneric( | 2095 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2078 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2096 void* ptr3 = PartitionAllocGeneric( |
| 2079 void* ptr4 = partitionAllocGeneric( | 2097 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2080 genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); | 2098 void* ptr4 = PartitionAllocGeneric( |
| 2099 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 2081 ptr1[0] = 'A'; | 2100 ptr1[0] = 'A'; |
| 2082 ptr1[kSystemPageSize] = 'A'; | 2101 ptr1[kSystemPageSize] = 'A'; |
| 2083 ptr1[kSystemPageSize * 2] = 'A'; | 2102 ptr1[kSystemPageSize * 2] = 'A'; |
| 2084 ptr1[kSystemPageSize * 3] = 'A'; | 2103 ptr1[kSystemPageSize * 3] = 'A'; |
| 2085 PartitionPage* page = | 2104 PartitionPage* page = |
| 2086 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); | 2105 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); |
| 2087 partitionFreeGeneric(genericAllocator.root(), ptr4); | 2106 PartitionFreeGeneric(generic_allocator.root(), ptr4); |
| 2088 partitionFreeGeneric(genericAllocator.root(), ptr3); | 2107 PartitionFreeGeneric(generic_allocator.root(), ptr3); |
| 2089 EXPECT_EQ(0u, page->numUnprovisionedSlots); | 2108 EXPECT_EQ(0u, page->num_unprovisioned_slots); |
| 2090 | 2109 |
| 2091 { | 2110 { |
| 2092 MockPartitionStatsDumper mockStatsDumperGeneric; | 2111 MockPartitionStatsDumper dumper; |
| 2093 partitionDumpStatsGeneric( | 2112 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 2094 genericAllocator.root(), "mock_generic_allocator", | 2113 "mock_generic_allocator", |
| 2095 false /* detailed dump */, &mockStatsDumperGeneric); | 2114 false /* detailed dump */, &dumper); |
| 2096 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); | 2115 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 2097 | 2116 |
| 2098 const PartitionBucketMemoryStats* stats = | 2117 const PartitionBucketMemoryStats* stats = |
| 2099 mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); | 2118 dumper.GetBucketStats(kSystemPageSize); |
| 2100 EXPECT_TRUE(stats); | 2119 EXPECT_TRUE(stats); |
| 2101 EXPECT_TRUE(stats->isValid); | 2120 EXPECT_TRUE(stats->is_valid); |
| 2102 EXPECT_EQ(0u, stats->decommittableBytes); | 2121 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 2103 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); | 2122 EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); |
| 2104 EXPECT_EQ(2 * kSystemPageSize, stats->activeBytes); | 2123 EXPECT_EQ(2 * kSystemPageSize, stats->active_bytes); |
| 2105 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes); | 2124 EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes); |
| 2106 } | 2125 } |
| 2107 CheckPageInCore(ptr1 - kPointerOffset, true); | 2126 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 2108 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); | 2127 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); |
| 2109 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); | 2128 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); |
| 2110 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); | 2129 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); |
| 2111 partitionPurgeMemoryGeneric(genericAllocator.root(), | 2130 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 2112 PartitionPurgeDiscardUnusedSystemPages); | 2131 PartitionPurgeDiscardUnusedSystemPages); |
| 2113 EXPECT_EQ(2u, page->numUnprovisionedSlots); | 2132 EXPECT_EQ(2u, page->num_unprovisioned_slots); |
| 2114 CheckPageInCore(ptr1 - kPointerOffset, true); | 2133 CheckPageInCore(ptr1 - kPointerOffset, true); |
| 2115 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); | 2134 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); |
| 2116 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false); | 2135 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false); |
| 2117 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); | 2136 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); |
| 2118 | 2137 |
| 2119 EXPECT_FALSE(page->freelistHead); | 2138 EXPECT_FALSE(page->freelist_head); |
| 2120 | 2139 |
| 2121 partitionFreeGeneric(genericAllocator.root(), ptr1); | 2140 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 2122 partitionFreeGeneric(genericAllocator.root(), ptr2); | 2141 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 2123 } | 2142 } |
| 2124 | 2143 |
| 2125 TestShutdown(); | 2144 TestShutdown(); |
| 2126 } | 2145 } |
| 2127 | 2146 |
| 2128 } // namespace base | 2147 } // namespace base |
| 2129 | 2148 |
| 2130 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 2149 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| OLD | NEW |