| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/allocator/partition_allocator/partition_alloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
| 6 | 6 |
| 7 #include <stdlib.h> | 7 #include <stdlib.h> |
| 8 #include <string.h> | 8 #include <string.h> |
| 9 | 9 |
| 10 #include <memory> | 10 #include <memory> |
| 11 #include <vector> | 11 #include <vector> |
| 12 | 12 |
| 13 #include "base/bits.h" | 13 #include "base/bits.h" |
| 14 #include "base/sys_info.h" | 14 #include "base/sys_info.h" |
| 15 #include "build/build_config.h" | 15 #include "build/build_config.h" |
| 16 #include "testing/gtest/include/gtest/gtest.h" | 16 #include "testing/gtest/include/gtest/gtest.h" |
| 17 | 17 |
| 18 #if defined(OS_POSIX) | 18 #if defined(OS_POSIX) |
| 19 #include <sys/mman.h> | 19 #include <sys/mman.h> |
| 20 #include <sys/resource.h> | 20 #include <sys/resource.h> |
| 21 #include <sys/time.h> | 21 #include <sys/time.h> |
| 22 | 22 |
| 23 #ifndef MAP_ANONYMOUS | |
| 24 #define MAP_ANONYMOUS MAP_ANON | |
| 25 #endif | |
| 26 #endif // defined(OS_POSIX) | 23 #endif // defined(OS_POSIX) |
| 27 | 24 |
| 25 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 26 |
| 28 namespace { | 27 namespace { |
| 28 |
| 29 template <typename T> | 29 template <typename T> |
| 30 std::unique_ptr<T[]> WrapArrayUnique(T* ptr) { | 30 std::unique_ptr<T[]> WrapArrayUnique(T* ptr) { |
| 31 return std::unique_ptr<T[]>(ptr); | 31 return std::unique_ptr<T[]>(ptr); |
| 32 } | 32 } |
| 33 } // namespace | |
| 34 | |
| 35 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
| 36 | |
| 37 namespace base { | |
| 38 | |
| 39 namespace { | |
| 40 | 33 |
| 41 const size_t kTestMaxAllocation = 4096; | 34 const size_t kTestMaxAllocation = 4096; |
| 42 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; | |
| 43 PartitionAllocatorGeneric generic_allocator; | |
| 44 | 35 |
| 45 const size_t kTestAllocSize = 16; | 36 bool IsLargeMemoryDevice() { |
| 46 #if !DCHECK_IS_ON() | 37 return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024; |
| 47 const size_t kPointerOffset = 0; | |
| 48 const size_t kExtraAllocSize = 0; | |
| 49 #else | |
| 50 const size_t kPointerOffset = kCookieSize; | |
| 51 const size_t kExtraAllocSize = kCookieSize * 2; | |
| 52 #endif | |
| 53 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; | |
| 54 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; | |
| 55 | |
| 56 const char* type_name = nullptr; | |
| 57 | |
| 58 void TestSetup() { | |
| 59 // Zero the allocator structs to clear out traces | |
| 60 // from previous test. | |
| 61 memset(&allocator, 0, sizeof(allocator)); | |
| 62 memset(&generic_allocator, 0, sizeof(generic_allocator)); | |
| 63 | |
| 64 allocator.init(); | |
| 65 generic_allocator.init(); | |
| 66 } | 38 } |
| 67 | 39 |
| 68 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) | |
| 69 bool SetAddressSpaceLimit() { | 40 bool SetAddressSpaceLimit() { |
| 70 #if !defined(ARCH_CPU_64_BITS) | 41 #if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX) |
| 71 // 32 bits => address space is limited already. | 42 // 32 bits => address space is limited already. |
| 72 return true; | 43 return true; |
| 73 #elif defined(OS_POSIX) && !defined(OS_MACOSX) | 44 #elif defined(OS_POSIX) && !defined(OS_MACOSX) |
| 74 // Mac will accept RLIMIT_AS changes but it is not enforced. | 45 // macOS will accept, but not enforce, |RLIMIT_AS| changes. See |
| 75 // See https://crbug.com/435269 and rdar://17576114. | 46 // https://crbug.com/435269 and rdar://17576114. |
| 76 // Note: this number must be not less than 6 GB, because with | 47 // |
| 77 // sanitizer_coverage_flags=edge, it reserves > 5 GB of address | 48 // Note: This number must be not less than 6 GB, because with |
| 78 // space, see https://crbug.com/674665. | 49 // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See |
| 50 // https://crbug.com/674665. |
| 79 const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024; | 51 const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024; |
| 80 struct rlimit limit; | 52 struct rlimit limit; |
| 81 if (getrlimit(RLIMIT_AS, &limit) != 0) | 53 if (getrlimit(RLIMIT_AS, &limit) != 0) |
| 82 return false; | 54 return false; |
| 83 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { | 55 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) { |
| 84 limit.rlim_cur = kAddressSpaceLimit; | 56 limit.rlim_cur = kAddressSpaceLimit; |
| 85 if (setrlimit(RLIMIT_AS, &limit) != 0) | 57 if (setrlimit(RLIMIT_AS, &limit) != 0) |
| 86 return false; | 58 return false; |
| 87 } | 59 } |
| 88 return true; | 60 return true; |
| 89 #else | 61 #else |
| 90 return false; | 62 return false; |
| 91 #endif | 63 #endif |
| 92 } | 64 } |
| 93 | 65 |
| 94 bool ClearAddressSpaceLimit() { | 66 bool ClearAddressSpaceLimit() { |
| 95 #if !defined(ARCH_CPU_64_BITS) | 67 #if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX) |
| 96 return true; | 68 return true; |
| 97 #elif defined(OS_POSIX) | 69 #elif defined(OS_POSIX) |
| 98 struct rlimit limit; | 70 struct rlimit limit; |
| 99 if (getrlimit(RLIMIT_AS, &limit) != 0) | 71 if (getrlimit(RLIMIT_AS, &limit) != 0) |
| 100 return false; | 72 return false; |
| 101 limit.rlim_cur = limit.rlim_max; | 73 limit.rlim_cur = limit.rlim_max; |
| 102 if (setrlimit(RLIMIT_AS, &limit) != 0) | 74 if (setrlimit(RLIMIT_AS, &limit) != 0) |
| 103 return false; | 75 return false; |
| 104 return true; | 76 return true; |
| 105 #else | 77 #else |
| 106 return false; | 78 return false; |
| 107 #endif | 79 #endif |
| 108 } | 80 } |
| 81 |
| 82 } // namespace |
| 83 |
| 84 namespace base { |
| 85 |
| 86 const size_t kTestAllocSize = 16; |
| 87 #if !DCHECK_IS_ON() |
| 88 const size_t kPointerOffset = 0; |
| 89 const size_t kExtraAllocSize = 0; |
| 90 #else |
| 91 const size_t kPointerOffset = kCookieSize; |
| 92 const size_t kExtraAllocSize = kCookieSize * 2; |
| 109 #endif | 93 #endif |
| 94 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; |
| 95 const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; |
| 110 | 96 |
| 111 PartitionPage* GetFullPage(size_t size) { | 97 const char* type_name = nullptr; |
| 112 size_t real_size = size + kExtraAllocSize; | 98 |
| 113 size_t bucket_index = real_size >> kBucketShift; | 99 class PartitionAllocTest : public testing::Test { |
| 114 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; | 100 protected: |
| 115 size_t num_slots = | 101 PartitionAllocTest() {} |
| 116 (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size; | 102 |
| 117 void* first = 0; | 103 ~PartitionAllocTest() override {} |
| 118 void* last = 0; | 104 |
| 119 size_t i; | 105 void SetUp() override { |
| 120 for (i = 0; i < num_slots; ++i) { | 106 // TODO(crbug.com/722911): These calls to |memset| should perhaps not be |
| 121 void* ptr = PartitionAlloc(allocator.root(), size, type_name); | 107 // necessary. |
| 122 EXPECT_TRUE(ptr); | 108 memset(&allocator, 0, sizeof(allocator)); |
| 123 if (!i) | 109 memset(&generic_allocator, 0, sizeof(generic_allocator)); |
| 124 first = PartitionCookieFreePointerAdjust(ptr); | 110 allocator.init(); |
| 125 else if (i == num_slots - 1) | 111 generic_allocator.init(); |
| 126 last = PartitionCookieFreePointerAdjust(ptr); | |
| 127 } | 112 } |
| 128 EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last)); | 113 |
| 129 if (bucket->num_system_pages_per_slot_span == kNumSystemPagesPerPartitionPage) | 114 PartitionPage* GetFullPage(size_t size) { |
| 130 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, | 115 size_t real_size = size + kExtraAllocSize; |
| 131 reinterpret_cast<size_t>(last) & kPartitionPageBaseMask); | 116 size_t bucket_index = real_size >> kBucketShift; |
| 132 EXPECT_EQ(num_slots, static_cast<size_t>( | 117 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 133 bucket->active_pages_head->num_allocated_slots)); | 118 size_t num_slots = |
| 134 EXPECT_EQ(0, bucket->active_pages_head->freelist_head); | 119 (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size; |
| 135 EXPECT_TRUE(bucket->active_pages_head); | 120 void* first = 0; |
| 136 EXPECT_TRUE(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage); | 121 void* last = 0; |
| 137 return bucket->active_pages_head; | 122 size_t i; |
| 138 } | 123 for (i = 0; i < num_slots; ++i) { |
| 124 void* ptr = PartitionAlloc(allocator.root(), size, type_name); |
| 125 EXPECT_TRUE(ptr); |
| 126 if (!i) |
| 127 first = PartitionCookieFreePointerAdjust(ptr); |
| 128 else if (i == num_slots - 1) |
| 129 last = PartitionCookieFreePointerAdjust(ptr); |
| 130 } |
| 131 EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last)); |
| 132 if (bucket->num_system_pages_per_slot_span == |
| 133 kNumSystemPagesPerPartitionPage) |
| 134 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, |
| 135 reinterpret_cast<size_t>(last) & kPartitionPageBaseMask); |
| 136 EXPECT_EQ(num_slots, static_cast<size_t>( |
| 137 bucket->active_pages_head->num_allocated_slots)); |
| 138 EXPECT_EQ(0, bucket->active_pages_head->freelist_head); |
| 139 EXPECT_TRUE(bucket->active_pages_head); |
| 140 EXPECT_TRUE(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage); |
| 141 return bucket->active_pages_head; |
| 142 } |
| 143 |
| 144 void CycleFreeCache(size_t size) { |
| 145 size_t real_size = size + kExtraAllocSize; |
| 146 size_t bucket_index = real_size >> kBucketShift; |
| 147 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 148 DCHECK(!bucket->active_pages_head->num_allocated_slots); |
| 149 |
| 150 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 151 void* ptr = PartitionAlloc(allocator.root(), size, type_name); |
| 152 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); |
| 153 PartitionFree(ptr); |
| 154 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); |
| 155 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); |
| 156 } |
| 157 } |
| 158 |
| 159 void CycleGenericFreeCache(size_t size) { |
| 160 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 161 void* ptr = |
| 162 PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 163 PartitionPage* page = |
| 164 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 165 PartitionBucket* bucket = page->bucket; |
| 166 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); |
| 167 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 168 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); |
| 169 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); |
| 170 } |
| 171 } |
| 172 |
| 173 void DoReturnNullTest(size_t allocSize) { |
| 174 // TODO(crbug.com/678782): Where necessary and possible, disable the |
| 175 // platform's OOM-killing behavior. OOM-killing makes this test flaky on |
| 176 // low-memory devices. |
| 177 if (!IsLargeMemoryDevice()) { |
| 178 LOG(WARNING) |
| 179 << "Skipping test on this device because of crbug.com/678782"; |
| 180 return; |
| 181 } |
| 182 |
| 183 EXPECT_TRUE(SetAddressSpaceLimit()); |
| 184 |
| 185 // Work out the number of allocations for 6 GB of memory. |
| 186 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); |
| 187 |
| 188 void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric( |
| 189 generic_allocator.root(), numAllocations * sizeof(void*), type_name)); |
| 190 int i; |
| 191 |
| 192 for (i = 0; i < numAllocations; ++i) { |
| 193 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 194 PartitionAllocReturnNull, allocSize, |
| 195 type_name); |
| 196 if (!i) |
| 197 EXPECT_TRUE(ptrs[0]); |
| 198 if (!ptrs[i]) { |
| 199 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 200 PartitionAllocReturnNull, |
| 201 allocSize, type_name); |
| 202 EXPECT_FALSE(ptrs[i]); |
| 203 break; |
| 204 } |
| 205 } |
| 206 |
| 207 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then |
| 208 // we're not actually testing anything here. |
| 209 EXPECT_LT(i, numAllocations); |
| 210 |
| 211 // Free, reallocate and free again each block we allocated. We do this to |
| 212 // check that freeing memory also works correctly after a failed allocation. |
| 213 for (--i; i >= 0; --i) { |
| 214 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); |
| 215 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), |
| 216 PartitionAllocReturnNull, allocSize, |
| 217 type_name); |
| 218 EXPECT_TRUE(ptrs[i]); |
| 219 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); |
| 220 } |
| 221 |
| 222 PartitionFreeGeneric(generic_allocator.root(), ptrs); |
| 223 |
| 224 EXPECT_TRUE(ClearAddressSpaceLimit()); |
| 225 } |
| 226 |
| 227 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; |
| 228 PartitionAllocatorGeneric generic_allocator; |
| 229 }; |
| 230 |
| 231 class PartitionAllocDeathTest : public PartitionAllocTest {}; |
| 232 |
| 233 namespace { |
| 139 | 234 |
| 140 void FreeFullPage(PartitionPage* page) { | 235 void FreeFullPage(PartitionPage* page) { |
| 141 size_t size = page->bucket->slot_size; | 236 size_t size = page->bucket->slot_size; |
| 142 size_t num_slots = | 237 size_t num_slots = |
| 143 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size; | 238 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size; |
| 144 EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots))); | 239 EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots))); |
| 145 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); | 240 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 146 size_t i; | 241 size_t i; |
| 147 for (i = 0; i < num_slots; ++i) { | 242 for (i = 0; i < num_slots; ++i) { |
| 148 PartitionFree(ptr + kPointerOffset); | 243 PartitionFree(ptr + kPointerOffset); |
| 149 ptr += size; | 244 ptr += size; |
| 150 } | 245 } |
| 151 } | 246 } |
| 152 | 247 |
| 153 void CycleFreeCache(size_t size) { | |
| 154 size_t real_size = size + kExtraAllocSize; | |
| 155 size_t bucket_index = real_size >> kBucketShift; | |
| 156 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; | |
| 157 DCHECK(!bucket->active_pages_head->num_allocated_slots); | |
| 158 | |
| 159 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | |
| 160 void* ptr = PartitionAlloc(allocator.root(), size, type_name); | |
| 161 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); | |
| 162 PartitionFree(ptr); | |
| 163 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); | |
| 164 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); | |
| 165 } | |
| 166 } | |
| 167 | |
| 168 void CycleGenericFreeCache(size_t size) { | |
| 169 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | |
| 170 void* ptr = | |
| 171 PartitionAllocGeneric(generic_allocator.root(), size, type_name); | |
| 172 PartitionPage* page = | |
| 173 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); | |
| 174 PartitionBucket* bucket = page->bucket; | |
| 175 EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); | |
| 176 PartitionFreeGeneric(generic_allocator.root(), ptr); | |
| 177 EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); | |
| 178 EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); | |
| 179 } | |
| 180 } | |
| 181 | |
| 182 void CheckPageInCore(void* ptr, bool inCore) { | 248 void CheckPageInCore(void* ptr, bool inCore) { |
| 183 #if defined(OS_LINUX) | 249 #if defined(OS_LINUX) |
| 184 unsigned char ret; | 250 unsigned char ret; |
| 185 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); | 251 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret)); |
| 186 EXPECT_EQ(inCore, ret); | 252 EXPECT_EQ(inCore, ret); |
| 187 #endif | 253 #endif |
| 188 } | 254 } |
| 189 | 255 |
| 190 bool IsLargeMemoryDevice() { | |
| 191 return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024; | |
| 192 } | |
| 193 | |
| 194 class MockPartitionStatsDumper : public PartitionStatsDumper { | 256 class MockPartitionStatsDumper : public PartitionStatsDumper { |
| 195 public: | 257 public: |
| 196 MockPartitionStatsDumper() | 258 MockPartitionStatsDumper() |
| 197 : total_resident_bytes(0), | 259 : total_resident_bytes(0), |
| 198 total_active_bytes(0), | 260 total_active_bytes(0), |
| 199 total_decommittable_bytes(0), | 261 total_decommittable_bytes(0), |
| 200 total_discardable_bytes(0) {} | 262 total_discardable_bytes(0) {} |
| 201 | 263 |
| 202 void PartitionDumpTotals(const char* partition_name, | 264 void PartitionDumpTotals(const char* partition_name, |
| 203 const PartitionMemoryStats* stats) override { | 265 const PartitionMemoryStats* stats) override { |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 size_t total_active_bytes; | 300 size_t total_active_bytes; |
| 239 size_t total_decommittable_bytes; | 301 size_t total_decommittable_bytes; |
| 240 size_t total_discardable_bytes; | 302 size_t total_discardable_bytes; |
| 241 | 303 |
| 242 std::vector<PartitionBucketMemoryStats> bucket_stats; | 304 std::vector<PartitionBucketMemoryStats> bucket_stats; |
| 243 }; | 305 }; |
| 244 | 306 |
| 245 } // anonymous namespace | 307 } // anonymous namespace |
| 246 | 308 |
| 247 // Check that the most basic of allocate / free pairs work. | 309 // Check that the most basic of allocate / free pairs work. |
| 248 TEST(PartitionAllocTest, Basic) { | 310 TEST_F(PartitionAllocTest, Basic) { |
| 249 TestSetup(); | |
| 250 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 311 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 251 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; | 312 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; |
| 252 | 313 |
| 253 EXPECT_FALSE(bucket->empty_pages_head); | 314 EXPECT_FALSE(bucket->empty_pages_head); |
| 254 EXPECT_FALSE(bucket->decommitted_pages_head); | 315 EXPECT_FALSE(bucket->decommitted_pages_head); |
| 255 EXPECT_EQ(seedPage, bucket->active_pages_head); | 316 EXPECT_EQ(seedPage, bucket->active_pages_head); |
| 256 EXPECT_EQ(0, bucket->active_pages_head->next_page); | 317 EXPECT_EQ(0, bucket->active_pages_head->next_page); |
| 257 | 318 |
| 258 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); | 319 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 259 EXPECT_TRUE(ptr); | 320 EXPECT_TRUE(ptr); |
| 260 EXPECT_EQ(kPointerOffset, | 321 EXPECT_EQ(kPointerOffset, |
| 261 reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask); | 322 reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask); |
| 262 // Check that the offset appears to include a guard page. | 323 // Check that the offset appears to include a guard page. |
| 263 EXPECT_EQ(kPartitionPageSize + kPointerOffset, | 324 EXPECT_EQ(kPartitionPageSize + kPointerOffset, |
| 264 reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); | 325 reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); |
| 265 | 326 |
| 266 PartitionFree(ptr); | 327 PartitionFree(ptr); |
| 267 // Expect that the last active page gets noticed as empty but doesn't get | 328 // Expect that the last active page gets noticed as empty but doesn't get |
| 268 // decommitted. | 329 // decommitted. |
| 269 EXPECT_TRUE(bucket->empty_pages_head); | 330 EXPECT_TRUE(bucket->empty_pages_head); |
| 270 EXPECT_FALSE(bucket->decommitted_pages_head); | 331 EXPECT_FALSE(bucket->decommitted_pages_head); |
| 271 } | 332 } |
| 272 | 333 |
| 273 // Test multiple allocations, and freelist handling. | 334 // Test multiple allocations, and freelist handling. |
| 274 TEST(PartitionAllocTest, MultiAlloc) { | 335 TEST_F(PartitionAllocTest, MultiAlloc) { |
| 275 TestSetup(); | |
| 276 | |
| 277 char* ptr1 = reinterpret_cast<char*>( | 336 char* ptr1 = reinterpret_cast<char*>( |
| 278 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); | 337 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 279 char* ptr2 = reinterpret_cast<char*>( | 338 char* ptr2 = reinterpret_cast<char*>( |
| 280 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); | 339 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 281 EXPECT_TRUE(ptr1); | 340 EXPECT_TRUE(ptr1); |
| 282 EXPECT_TRUE(ptr2); | 341 EXPECT_TRUE(ptr2); |
| 283 ptrdiff_t diff = ptr2 - ptr1; | 342 ptrdiff_t diff = ptr2 - ptr1; |
| 284 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); | 343 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); |
| 285 | 344 |
| 286 // Check that we re-use the just-freed slot. | 345 // Check that we re-use the just-freed slot. |
| (...skipping 15 matching lines...) Expand all Loading... |
| 302 EXPECT_TRUE(ptr3); | 361 EXPECT_TRUE(ptr3); |
| 303 diff = ptr3 - ptr1; | 362 diff = ptr3 - ptr1; |
| 304 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); | 363 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); |
| 305 | 364 |
| 306 PartitionFree(ptr1); | 365 PartitionFree(ptr1); |
| 307 PartitionFree(ptr2); | 366 PartitionFree(ptr2); |
| 308 PartitionFree(ptr3); | 367 PartitionFree(ptr3); |
| 309 } | 368 } |
| 310 | 369 |
| 311 // Test a bucket with multiple pages. | 370 // Test a bucket with multiple pages. |
| 312 TEST(PartitionAllocTest, MultiPages) { | 371 TEST_F(PartitionAllocTest, MultiPages) { |
| 313 TestSetup(); | |
| 314 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 372 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 315 | 373 |
| 316 PartitionPage* page = GetFullPage(kTestAllocSize); | 374 PartitionPage* page = GetFullPage(kTestAllocSize); |
| 317 FreeFullPage(page); | 375 FreeFullPage(page); |
| 318 EXPECT_TRUE(bucket->empty_pages_head); | 376 EXPECT_TRUE(bucket->empty_pages_head); |
| 319 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); | 377 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 320 EXPECT_EQ(0, page->next_page); | 378 EXPECT_EQ(0, page->next_page); |
| 321 EXPECT_EQ(0, page->num_allocated_slots); | 379 EXPECT_EQ(0, page->num_allocated_slots); |
| 322 | 380 |
| 323 page = GetFullPage(kTestAllocSize); | 381 page = GetFullPage(kTestAllocSize); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 344 | 402 |
| 345 FreeFullPage(page); | 403 FreeFullPage(page); |
| 346 FreeFullPage(page2); | 404 FreeFullPage(page2); |
| 347 EXPECT_EQ(0, page->num_allocated_slots); | 405 EXPECT_EQ(0, page->num_allocated_slots); |
| 348 EXPECT_EQ(0, page2->num_allocated_slots); | 406 EXPECT_EQ(0, page2->num_allocated_slots); |
| 349 EXPECT_EQ(0, page2->num_unprovisioned_slots); | 407 EXPECT_EQ(0, page2->num_unprovisioned_slots); |
| 350 EXPECT_NE(-1, page2->empty_cache_index); | 408 EXPECT_NE(-1, page2->empty_cache_index); |
| 351 } | 409 } |
| 352 | 410 |
| 353 // Test some finer aspects of internal page transitions. | 411 // Test some finer aspects of internal page transitions. |
| 354 TEST(PartitionAllocTest, PageTransitions) { | 412 TEST_F(PartitionAllocTest, PageTransitions) { |
| 355 TestSetup(); | |
| 356 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 413 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 357 | 414 |
| 358 PartitionPage* page1 = GetFullPage(kTestAllocSize); | 415 PartitionPage* page1 = GetFullPage(kTestAllocSize); |
| 359 EXPECT_EQ(page1, bucket->active_pages_head); | 416 EXPECT_EQ(page1, bucket->active_pages_head); |
| 360 EXPECT_EQ(0, page1->next_page); | 417 EXPECT_EQ(0, page1->next_page); |
| 361 PartitionPage* page2 = GetFullPage(kTestAllocSize); | 418 PartitionPage* page2 = GetFullPage(kTestAllocSize); |
| 362 EXPECT_EQ(page2, bucket->active_pages_head); | 419 EXPECT_EQ(page2, bucket->active_pages_head); |
| 363 EXPECT_EQ(0, page2->next_page); | 420 EXPECT_EQ(0, page2->next_page); |
| 364 | 421 |
| 365 // Bounce page1 back into the non-full list then fill it up again. | 422 // Bounce page1 back into the non-full list then fill it up again. |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 405 FreeFullPage(page1); | 462 FreeFullPage(page1); |
| 406 | 463 |
| 407 // Allocating whilst in this state exposed a bug, so keep the test. | 464 // Allocating whilst in this state exposed a bug, so keep the test. |
| 408 ptr = reinterpret_cast<char*>( | 465 ptr = reinterpret_cast<char*>( |
| 409 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); | 466 PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); |
| 410 PartitionFree(ptr); | 467 PartitionFree(ptr); |
| 411 } | 468 } |
| 412 | 469 |
| 413 // Test some corner cases relating to page transitions in the internal | 470 // Test some corner cases relating to page transitions in the internal |
| 414 // free page list metadata bucket. | 471 // free page list metadata bucket. |
| 415 TEST(PartitionAllocTest, FreePageListPageTransitions) { | 472 TEST_F(PartitionAllocTest, FreePageListPageTransitions) { |
| 416 TestSetup(); | |
| 417 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 473 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 418 | 474 |
| 419 size_t numToFillFreeListPage = | 475 size_t numToFillFreeListPage = |
| 420 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); | 476 kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); |
| 421 // The +1 is because we need to account for the fact that the current page | 477 // The +1 is because we need to account for the fact that the current page |
| 422 // never gets thrown on the freelist. | 478 // never gets thrown on the freelist. |
| 423 ++numToFillFreeListPage; | 479 ++numToFillFreeListPage; |
| 424 std::unique_ptr<PartitionPage* []> pages = | 480 std::unique_ptr<PartitionPage* []> pages = |
| 425 WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]); | 481 WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]); |
| 426 | 482 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 448 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); | 504 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); |
| 449 | 505 |
| 450 for (i = 0; i < numToFillFreeListPage; ++i) | 506 for (i = 0; i < numToFillFreeListPage; ++i) |
| 451 FreeFullPage(pages[i]); | 507 FreeFullPage(pages[i]); |
| 452 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); | 508 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); |
| 453 EXPECT_TRUE(bucket->empty_pages_head); | 509 EXPECT_TRUE(bucket->empty_pages_head); |
| 454 } | 510 } |
| 455 | 511 |
| 456 // Test a large series of allocations that cross more than one underlying | 512 // Test a large series of allocations that cross more than one underlying |
| 457 // 64KB super page allocation. | 513 // 64KB super page allocation. |
| 458 TEST(PartitionAllocTest, MultiPageAllocs) { | 514 TEST_F(PartitionAllocTest, MultiPageAllocs) { |
| 459 TestSetup(); | |
| 460 // This is guaranteed to cross a super page boundary because the first | 515 // This is guaranteed to cross a super page boundary because the first |
| 461 // partition page "slot" will be taken up by a guard page. | 516 // partition page "slot" will be taken up by a guard page. |
| 462 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage; | 517 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage; |
| 463 // The super page should begin and end in a guard so we one less page in | 518 // The super page should begin and end in a guard so we one less page in |
| 464 // order to allocate a single page in the new super page. | 519 // order to allocate a single page in the new super page. |
| 465 --numPagesNeeded; | 520 --numPagesNeeded; |
| 466 | 521 |
| 467 EXPECT_GT(numPagesNeeded, 1u); | 522 EXPECT_GT(numPagesNeeded, 1u); |
| 468 std::unique_ptr<PartitionPage* []> pages; | 523 std::unique_ptr<PartitionPage* []> pages; |
| 469 pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]); | 524 pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 484 // Check that we allocated a guard page for the second page. | 539 // Check that we allocated a guard page for the second page. |
| 485 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset); | 540 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset); |
| 486 } | 541 } |
| 487 } | 542 } |
| 488 for (i = 0; i < numPagesNeeded; ++i) | 543 for (i = 0; i < numPagesNeeded; ++i) |
| 489 FreeFullPage(pages[i]); | 544 FreeFullPage(pages[i]); |
| 490 } | 545 } |
| 491 | 546 |
| 492 // Test the generic allocation functions that can handle arbitrary sizes and | 547 // Test the generic allocation functions that can handle arbitrary sizes and |
| 493 // reallocing etc. | 548 // reallocing etc. |
| 494 TEST(PartitionAllocTest, GenericAlloc) { | 549 TEST_F(PartitionAllocTest, GenericAlloc) { |
| 495 TestSetup(); | |
| 496 | |
| 497 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); | 550 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); |
| 498 EXPECT_TRUE(ptr); | 551 EXPECT_TRUE(ptr); |
| 499 PartitionFreeGeneric(generic_allocator.root(), ptr); | 552 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 500 ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1, | 553 ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1, |
| 501 type_name); | 554 type_name); |
| 502 EXPECT_TRUE(ptr); | 555 EXPECT_TRUE(ptr); |
| 503 PartitionFreeGeneric(generic_allocator.root(), ptr); | 556 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 504 | 557 |
| 505 ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); | 558 ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); |
| 506 EXPECT_TRUE(ptr); | 559 EXPECT_TRUE(ptr); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 578 EXPECT_NE(newPtr, ptr); | 631 EXPECT_NE(newPtr, ptr); |
| 579 EXPECT_EQ(newPtr, origPtr); | 632 EXPECT_EQ(newPtr, origPtr); |
| 580 newCharPtr = static_cast<char*>(newPtr); | 633 newCharPtr = static_cast<char*>(newPtr); |
| 581 EXPECT_EQ(*newCharPtr, 'F'); | 634 EXPECT_EQ(*newCharPtr, 'F'); |
| 582 | 635 |
| 583 PartitionFreeGeneric(generic_allocator.root(), newPtr); | 636 PartitionFreeGeneric(generic_allocator.root(), newPtr); |
| 584 } | 637 } |
| 585 | 638 |
| 586 // Test the generic allocation functions can handle some specific sizes of | 639 // Test the generic allocation functions can handle some specific sizes of |
| 587 // interest. | 640 // interest. |
| 588 TEST(PartitionAllocTest, GenericAllocSizes) { | 641 TEST_F(PartitionAllocTest, GenericAllocSizes) { |
| 589 TestSetup(); | |
| 590 | |
| 591 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name); | 642 void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name); |
| 592 EXPECT_TRUE(ptr); | 643 EXPECT_TRUE(ptr); |
| 593 PartitionFreeGeneric(generic_allocator.root(), ptr); | 644 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 594 | 645 |
| 595 // kPartitionPageSize is interesting because it results in just one | 646 // kPartitionPageSize is interesting because it results in just one |
| 596 // allocation per page, which tripped up some corner cases. | 647 // allocation per page, which tripped up some corner cases. |
| 597 size_t size = kPartitionPageSize - kExtraAllocSize; | 648 size_t size = kPartitionPageSize - kExtraAllocSize; |
| 598 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 649 ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 599 EXPECT_TRUE(ptr); | 650 EXPECT_TRUE(ptr); |
| 600 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 651 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 674 // Can we free null? | 725 // Can we free null? |
| 675 PartitionFreeGeneric(generic_allocator.root(), 0); | 726 PartitionFreeGeneric(generic_allocator.root(), 0); |
| 676 | 727 |
| 677 // Do we correctly get a null for a failed allocation? | 728 // Do we correctly get a null for a failed allocation? |
| 678 EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(), | 729 EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(), |
| 679 PartitionAllocReturnNull, | 730 PartitionAllocReturnNull, |
| 680 3u * 1024 * 1024 * 1024, type_name)); | 731 3u * 1024 * 1024 * 1024, type_name)); |
| 681 } | 732 } |
| 682 | 733 |
| 683 // Test that we can fetch the real allocated size after an allocation. | 734 // Test that we can fetch the real allocated size after an allocation. |
| 684 TEST(PartitionAllocTest, GenericAllocGetSize) { | 735 TEST_F(PartitionAllocTest, GenericAllocGetSize) { |
| 685 TestSetup(); | |
| 686 | |
| 687 void* ptr; | 736 void* ptr; |
| 688 size_t requestedSize, actualSize, predictedSize; | 737 size_t requestedSize, actualSize, predictedSize; |
| 689 | 738 |
| 690 EXPECT_TRUE(PartitionAllocSupportsGetSize()); | 739 EXPECT_TRUE(PartitionAllocSupportsGetSize()); |
| 691 | 740 |
| 692 // Allocate something small. | 741 // Allocate something small. |
| 693 requestedSize = 511 - kExtraAllocSize; | 742 requestedSize = 511 - kExtraAllocSize; |
| 694 predictedSize = | 743 predictedSize = |
| 695 PartitionAllocActualSize(generic_allocator.root(), requestedSize); | 744 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 696 ptr = | 745 ptr = |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 745 } | 794 } |
| 746 | 795 |
| 747 // Too large allocation. | 796 // Too large allocation. |
| 748 requestedSize = INT_MAX; | 797 requestedSize = INT_MAX; |
| 749 predictedSize = | 798 predictedSize = |
| 750 PartitionAllocActualSize(generic_allocator.root(), requestedSize); | 799 PartitionAllocActualSize(generic_allocator.root(), requestedSize); |
| 751 EXPECT_EQ(requestedSize, predictedSize); | 800 EXPECT_EQ(requestedSize, predictedSize); |
| 752 } | 801 } |
| 753 | 802 |
| 754 // Test the realloc() contract. | 803 // Test the realloc() contract. |
| 755 TEST(PartitionAllocTest, Realloc) { | 804 TEST_F(PartitionAllocTest, Realloc) { |
| 756 TestSetup(); | |
| 757 | |
| 758 // realloc(0, size) should be equivalent to malloc(). | 805 // realloc(0, size) should be equivalent to malloc(). |
| 759 void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0, | 806 void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0, |
| 760 kTestAllocSize, type_name); | 807 kTestAllocSize, type_name); |
| 761 memset(ptr, 'A', kTestAllocSize); | 808 memset(ptr, 'A', kTestAllocSize); |
| 762 PartitionPage* page = | 809 PartitionPage* page = |
| 763 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); | 810 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 764 // realloc(ptr, 0) should be equivalent to free(). | 811 // realloc(ptr, 0) should be equivalent to free(). |
| 765 void* ptr2 = | 812 void* ptr2 = |
| 766 PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name); | 813 PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name); |
| 767 EXPECT_EQ(0, ptr2); | 814 EXPECT_EQ(0, ptr2); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 817 // Test that a direct mapped allocation is performed not in-place when the | 864 // Test that a direct mapped allocation is performed not in-place when the |
| 818 // new size is small enough. | 865 // new size is small enough. |
| 819 ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize, | 866 ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize, |
| 820 type_name); | 867 type_name); |
| 821 EXPECT_NE(ptr, ptr2); | 868 EXPECT_NE(ptr, ptr2); |
| 822 | 869 |
| 823 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 870 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 824 } | 871 } |
| 825 | 872 |
| 826 // Tests the handing out of freelists for partial pages. | 873 // Tests the handing out of freelists for partial pages. |
| 827 TEST(PartitionAllocTest, PartialPageFreelists) { | 874 TEST_F(PartitionAllocTest, PartialPageFreelists) { |
| 828 TestSetup(); | |
| 829 | |
| 830 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; | 875 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; |
| 831 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, | 876 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, |
| 832 big_size + kExtraAllocSize); | 877 big_size + kExtraAllocSize); |
| 833 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; | 878 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; |
| 834 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; | 879 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 835 EXPECT_EQ(0, bucket->empty_pages_head); | 880 EXPECT_EQ(0, bucket->empty_pages_head); |
| 836 | 881 |
| 837 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); | 882 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 838 EXPECT_TRUE(ptr); | 883 EXPECT_TRUE(ptr); |
| 839 | 884 |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 973 EXPECT_EQ(1, page->num_allocated_slots); | 1018 EXPECT_EQ(1, page->num_allocated_slots); |
| 974 EXPECT_FALSE(page->freelist_head); | 1019 EXPECT_FALSE(page->freelist_head); |
| 975 totalSlots = | 1020 totalSlots = |
| 976 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / | 1021 (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / |
| 977 (pageSize + kExtraAllocSize); | 1022 (pageSize + kExtraAllocSize); |
| 978 EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots); | 1023 EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots); |
| 979 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1024 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 980 } | 1025 } |
| 981 | 1026 |
| 982 // Test some of the fragmentation-resistant properties of the allocator. | 1027 // Test some of the fragmentation-resistant properties of the allocator. |
| 983 TEST(PartitionAllocTest, PageRefilling) { | 1028 TEST_F(PartitionAllocTest, PageRefilling) { |
| 984 TestSetup(); | |
| 985 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; | 1029 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; |
| 986 | 1030 |
| 987 // Grab two full pages and a non-full page. | 1031 // Grab two full pages and a non-full page. |
| 988 PartitionPage* page1 = GetFullPage(kTestAllocSize); | 1032 PartitionPage* page1 = GetFullPage(kTestAllocSize); |
| 989 PartitionPage* page2 = GetFullPage(kTestAllocSize); | 1033 PartitionPage* page2 = GetFullPage(kTestAllocSize); |
| 990 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); | 1034 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 991 EXPECT_TRUE(ptr); | 1035 EXPECT_TRUE(ptr); |
| 992 EXPECT_NE(page1, bucket->active_pages_head); | 1036 EXPECT_NE(page1, bucket->active_pages_head); |
| 993 EXPECT_NE(page2, bucket->active_pages_head); | 1037 EXPECT_NE(page2, bucket->active_pages_head); |
| 994 PartitionPage* page = | 1038 PartitionPage* page = |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1008 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); | 1052 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1009 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); | 1053 (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1010 EXPECT_EQ(1, page->num_allocated_slots); | 1054 EXPECT_EQ(1, page->num_allocated_slots); |
| 1011 | 1055 |
| 1012 FreeFullPage(page2); | 1056 FreeFullPage(page2); |
| 1013 FreeFullPage(page1); | 1057 FreeFullPage(page1); |
| 1014 PartitionFree(ptr); | 1058 PartitionFree(ptr); |
| 1015 } | 1059 } |
| 1016 | 1060 |
| 1017 // Basic tests to ensure that allocations work for partial page buckets. | 1061 // Basic tests to ensure that allocations work for partial page buckets. |
| 1018 TEST(PartitionAllocTest, PartialPages) { | 1062 TEST_F(PartitionAllocTest, PartialPages) { |
| 1019 TestSetup(); | |
| 1020 | |
| 1021 // Find a size that is backed by a partial partition page. | 1063 // Find a size that is backed by a partial partition page. |
| 1022 size_t size = sizeof(void*); | 1064 size_t size = sizeof(void*); |
| 1023 PartitionBucket* bucket = 0; | 1065 PartitionBucket* bucket = 0; |
| 1024 while (size < kTestMaxAllocation) { | 1066 while (size < kTestMaxAllocation) { |
| 1025 bucket = &allocator.root()->buckets()[size >> kBucketShift]; | 1067 bucket = &allocator.root()->buckets()[size >> kBucketShift]; |
| 1026 if (bucket->num_system_pages_per_slot_span % | 1068 if (bucket->num_system_pages_per_slot_span % |
| 1027 kNumSystemPagesPerPartitionPage) | 1069 kNumSystemPagesPerPartitionPage) |
| 1028 break; | 1070 break; |
| 1029 size += sizeof(void*); | 1071 size += sizeof(void*); |
| 1030 } | 1072 } |
| 1031 EXPECT_LT(size, kTestMaxAllocation); | 1073 EXPECT_LT(size, kTestMaxAllocation); |
| 1032 | 1074 |
| 1033 PartitionPage* page1 = GetFullPage(size); | 1075 PartitionPage* page1 = GetFullPage(size); |
| 1034 PartitionPage* page2 = GetFullPage(size); | 1076 PartitionPage* page2 = GetFullPage(size); |
| 1035 FreeFullPage(page2); | 1077 FreeFullPage(page2); |
| 1036 FreeFullPage(page1); | 1078 FreeFullPage(page1); |
| 1037 } | 1079 } |
| 1038 | 1080 |
| 1039 // Test correct handling if our mapping collides with another. | 1081 // Test correct handling if our mapping collides with another. |
| 1040 TEST(PartitionAllocTest, MappingCollision) { | 1082 TEST_F(PartitionAllocTest, MappingCollision) { |
| 1041 TestSetup(); | |
| 1042 // The -2 is because the first and last partition pages in a super page are | 1083 // The -2 is because the first and last partition pages in a super page are |
| 1043 // guard pages. | 1084 // guard pages. |
| 1044 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2; | 1085 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2; |
| 1045 std::unique_ptr<PartitionPage* []> firstSuperPagePages = | 1086 std::unique_ptr<PartitionPage* []> firstSuperPagePages = |
| 1046 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); | 1087 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); |
| 1047 std::unique_ptr<PartitionPage* []> secondSuperPagePages = | 1088 std::unique_ptr<PartitionPage* []> secondSuperPagePages = |
| 1048 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); | 1089 WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]); |
| 1049 | 1090 |
| 1050 size_t i; | 1091 size_t i; |
| 1051 for (i = 0; i < numPartitionPagesNeeded; ++i) | 1092 for (i = 0; i < numPartitionPagesNeeded; ++i) |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1112 kSuperPageBaseMask); | 1153 kSuperPageBaseMask); |
| 1113 | 1154 |
| 1114 FreeFullPage(pageInThirdSuperPage); | 1155 FreeFullPage(pageInThirdSuperPage); |
| 1115 for (i = 0; i < numPartitionPagesNeeded; ++i) { | 1156 for (i = 0; i < numPartitionPagesNeeded; ++i) { |
| 1116 FreeFullPage(firstSuperPagePages[i]); | 1157 FreeFullPage(firstSuperPagePages[i]); |
| 1117 FreeFullPage(secondSuperPagePages[i]); | 1158 FreeFullPage(secondSuperPagePages[i]); |
| 1118 } | 1159 } |
| 1119 } | 1160 } |
| 1120 | 1161 |
| 1121 // Tests that pages in the free page cache do get freed as appropriate. | 1162 // Tests that pages in the free page cache do get freed as appropriate. |
| 1122 TEST(PartitionAllocTest, FreeCache) { | 1163 TEST_F(PartitionAllocTest, FreeCache) { |
| 1123 TestSetup(); | |
| 1124 | |
| 1125 EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages); | 1164 EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages); |
| 1126 | 1165 |
| 1127 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; | 1166 size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; |
| 1128 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; | 1167 size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; |
| 1129 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; | 1168 PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; |
| 1130 | 1169 |
| 1131 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); | 1170 void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 1132 EXPECT_TRUE(ptr); | 1171 EXPECT_TRUE(ptr); |
| 1133 PartitionPage* page = | 1172 PartitionPage* page = |
| 1134 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); | 1173 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1165 ptr = PartitionAlloc(allocator.root(), big_size, type_name); | 1204 ptr = PartitionAlloc(allocator.root(), big_size, type_name); |
| 1166 EXPECT_TRUE(page->freelist_head); | 1205 EXPECT_TRUE(page->freelist_head); |
| 1167 PartitionFree(ptr); | 1206 PartitionFree(ptr); |
| 1168 EXPECT_TRUE(page->freelist_head); | 1207 EXPECT_TRUE(page->freelist_head); |
| 1169 } | 1208 } |
| 1170 EXPECT_EQ(kPartitionPageSize, | 1209 EXPECT_EQ(kPartitionPageSize, |
| 1171 allocator.root()->total_size_of_committed_pages); | 1210 allocator.root()->total_size_of_committed_pages); |
| 1172 } | 1211 } |
| 1173 | 1212 |
| 1174 // Tests for a bug we had with losing references to free pages. | 1213 // Tests for a bug we had with losing references to free pages. |
| 1175 TEST(PartitionAllocTest, LostFreePagesBug) { | 1214 TEST_F(PartitionAllocTest, LostFreePagesBug) { |
| 1176 TestSetup(); | |
| 1177 | |
| 1178 size_t size = kPartitionPageSize - kExtraAllocSize; | 1215 size_t size = kPartitionPageSize - kExtraAllocSize; |
| 1179 | 1216 |
| 1180 void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1217 void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1181 EXPECT_TRUE(ptr); | 1218 EXPECT_TRUE(ptr); |
| 1182 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1219 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1183 EXPECT_TRUE(ptr2); | 1220 EXPECT_TRUE(ptr2); |
| 1184 | 1221 |
| 1185 PartitionPage* page = | 1222 PartitionPage* page = |
| 1186 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); | 1223 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 1187 PartitionPage* page2 = | 1224 PartitionPage* page2 = |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1229 EXPECT_TRUE(ptr); | 1266 EXPECT_TRUE(ptr); |
| 1230 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1267 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1231 | 1268 |
| 1232 EXPECT_TRUE(bucket->active_pages_head); | 1269 EXPECT_TRUE(bucket->active_pages_head); |
| 1233 EXPECT_TRUE(bucket->empty_pages_head); | 1270 EXPECT_TRUE(bucket->empty_pages_head); |
| 1234 EXPECT_TRUE(bucket->decommitted_pages_head); | 1271 EXPECT_TRUE(bucket->decommitted_pages_head); |
| 1235 } | 1272 } |
| 1236 | 1273 |
| 1237 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) | 1274 #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
| 1238 | 1275 |
| 1239 static void DoReturnNullTest(size_t allocSize) { | |
| 1240 // TODO(crbug.com/678782): Where necessary and possible, disable the | |
| 1241 // platform's OOM-killing behavior. OOM-killing makes this test flaky on | |
| 1242 // low-memory devices. | |
| 1243 if (!IsLargeMemoryDevice()) { | |
| 1244 LOG(WARNING) << "Skipping test on this device because of crbug.com/678782"; | |
| 1245 return; | |
| 1246 } | |
| 1247 | |
| 1248 TestSetup(); | |
| 1249 | |
| 1250 EXPECT_TRUE(SetAddressSpaceLimit()); | |
| 1251 | |
| 1252 // Work out the number of allocations for 6 GB of memory. | |
| 1253 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); | |
| 1254 | |
| 1255 void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric( | |
| 1256 generic_allocator.root(), numAllocations * sizeof(void*), type_name)); | |
| 1257 int i; | |
| 1258 | |
| 1259 for (i = 0; i < numAllocations; ++i) { | |
| 1260 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), | |
| 1261 PartitionAllocReturnNull, allocSize, | |
| 1262 type_name); | |
| 1263 if (!i) | |
| 1264 EXPECT_TRUE(ptrs[0]); | |
| 1265 if (!ptrs[i]) { | |
| 1266 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), | |
| 1267 PartitionAllocReturnNull, allocSize, | |
| 1268 type_name); | |
| 1269 EXPECT_FALSE(ptrs[i]); | |
| 1270 break; | |
| 1271 } | |
| 1272 } | |
| 1273 | |
| 1274 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then | |
| 1275 // we're not actually testing anything here. | |
| 1276 EXPECT_LT(i, numAllocations); | |
| 1277 | |
| 1278 // Free, reallocate and free again each block we allocated. We do this to | |
| 1279 // check that freeing memory also works correctly after a failed allocation. | |
| 1280 for (--i; i >= 0; --i) { | |
| 1281 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); | |
| 1282 ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), | |
| 1283 PartitionAllocReturnNull, allocSize, | |
| 1284 type_name); | |
| 1285 EXPECT_TRUE(ptrs[i]); | |
| 1286 PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); | |
| 1287 } | |
| 1288 | |
| 1289 PartitionFreeGeneric(generic_allocator.root(), ptrs); | |
| 1290 | |
| 1291 EXPECT_TRUE(ClearAddressSpaceLimit()); | |
| 1292 } | |
| 1293 | 1276 |
| 1294 // Unit tests that check if an allocation fails in "return null" mode, | 1277 // Unit tests that check if an allocation fails in "return null" mode, |
| 1295 // repeating it doesn't crash, and still returns null. The tests need to | 1278 // repeating it doesn't crash, and still returns null. The tests need to |
| 1296 // stress memory subsystem limits to do so, hence they try to allocate | 1279 // stress memory subsystem limits to do so, hence they try to allocate |
| 1297 // 6 GB of memory, each with a different per-allocation block sizes. | 1280 // 6 GB of memory, each with a different per-allocation block sizes. |
| 1298 // | 1281 // |
| 1299 // On 64-bit POSIX systems, the address space is limited to 6 GB using | 1282 // On 64-bit POSIX systems, the address space is limited to 6 GB using |
| 1300 // setrlimit() first. | 1283 // setrlimit() first. |
| 1301 | 1284 |
| 1302 // Test "return null" for larger, direct-mapped allocations first. As a | 1285 // Test "return null" for larger, direct-mapped allocations first. As a |
| 1303 // direct-mapped allocation's pages are unmapped and freed on release, this | 1286 // direct-mapped allocation's pages are unmapped and freed on release, this |
| 1304 // test is performd first for these "return null" tests in order to leave | 1287 // test is performd first for these "return null" tests in order to leave |
| 1305 // sufficient unreserved virtual memory around for the later one(s). | 1288 // sufficient unreserved virtual memory around for the later one(s). |
| 1306 | 1289 |
| 1307 // Disable this test on Android because, due to its allocation-heavy behavior, | 1290 // Disable this test on Android because, due to its allocation-heavy behavior, |
| 1308 // it tends to get OOM-killed rather than pass. | 1291 // it tends to get OOM-killed rather than pass. |
| 1309 #if defined(OS_MACOSX) || defined(OS_ANDROID) | 1292 #if defined(OS_MACOSX) || defined(OS_ANDROID) |
| 1310 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect | 1293 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect |
| 1311 #else | 1294 #else |
| 1312 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect | 1295 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect |
| 1313 #endif | 1296 #endif |
| 1314 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) { | 1297 TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) { |
| 1315 // A direct-mapped allocation size. | 1298 // A direct-mapped allocation size. |
| 1316 DoReturnNullTest(32 * 1024 * 1024); | 1299 DoReturnNullTest(32 * 1024 * 1024); |
| 1317 } | 1300 } |
| 1318 | 1301 |
| 1319 // Test "return null" with a 512 kB block size. | 1302 // Test "return null" with a 512 kB block size. |
| 1320 | 1303 |
| 1321 // Disable this test on Android because, due to its allocation-heavy behavior, | 1304 // Disable this test on Android because, due to its allocation-heavy behavior, |
| 1322 // it tends to get OOM-killed rather than pass. | 1305 // it tends to get OOM-killed rather than pass. |
| 1323 #if defined(OS_MACOSX) || defined(OS_ANDROID) | 1306 #if defined(OS_MACOSX) || defined(OS_ANDROID) |
| 1324 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull | 1307 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull |
| 1325 #else | 1308 #else |
| 1326 #define MAYBE_RepeatedReturnNull RepeatedReturnNull | 1309 #define MAYBE_RepeatedReturnNull RepeatedReturnNull |
| 1327 #endif | 1310 #endif |
| 1328 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) { | 1311 TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNull) { |
| 1329 // A single-slot but non-direct-mapped allocation size. | 1312 // A single-slot but non-direct-mapped allocation size. |
| 1330 DoReturnNullTest(512 * 1024); | 1313 DoReturnNullTest(512 * 1024); |
| 1331 } | 1314 } |
| 1332 | 1315 |
| 1333 #endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) | 1316 #endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) |
| 1334 | 1317 |
| 1335 // Death tests misbehave on Android, http://crbug.com/643760. | 1318 // Death tests misbehave on Android, http://crbug.com/643760. |
| 1336 #if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) | 1319 #if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) |
| 1337 | 1320 |
| 1338 // Make sure that malloc(-1) dies. | 1321 // Make sure that malloc(-1) dies. |
| 1339 // In the past, we had an integer overflow that would alias malloc(-1) to | 1322 // In the past, we had an integer overflow that would alias malloc(-1) to |
| 1340 // malloc(0), which is not good. | 1323 // malloc(0), which is not good. |
| 1341 TEST(PartitionAllocDeathTest, LargeAllocs) { | 1324 TEST_F(PartitionAllocDeathTest, LargeAllocs) { |
| 1342 TestSetup(); | |
| 1343 // Largest alloc. | 1325 // Largest alloc. |
| 1344 EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(), | 1326 EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(), |
| 1345 static_cast<size_t>(-1), type_name), | 1327 static_cast<size_t>(-1), type_name), |
| 1346 ""); | 1328 ""); |
| 1347 // And the smallest allocation we expect to die. | 1329 // And the smallest allocation we expect to die. |
| 1348 EXPECT_DEATH( | 1330 EXPECT_DEATH( |
| 1349 PartitionAllocGeneric(generic_allocator.root(), | 1331 PartitionAllocGeneric(generic_allocator.root(), |
| 1350 static_cast<size_t>(INT_MAX) + 1, type_name), | 1332 static_cast<size_t>(INT_MAX) + 1, type_name), |
| 1351 ""); | 1333 ""); |
| 1352 } | 1334 } |
| 1353 | 1335 |
| 1354 // Check that our immediate double-free detection works. | 1336 // Check that our immediate double-free detection works. |
| 1355 TEST(PartitionAllocDeathTest, ImmediateDoubleFree) { | 1337 TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) { |
| 1356 TestSetup(); | |
| 1357 | |
| 1358 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, | 1338 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1359 type_name); | 1339 type_name); |
| 1360 EXPECT_TRUE(ptr); | 1340 EXPECT_TRUE(ptr); |
| 1361 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1341 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1362 | 1342 |
| 1363 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); | 1343 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); |
| 1364 } | 1344 } |
| 1365 | 1345 |
| 1366 // Check that our refcount-based double-free detection works. | 1346 // Check that our refcount-based double-free detection works. |
| 1367 TEST(PartitionAllocDeathTest, RefcountDoubleFree) { | 1347 TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) { |
| 1368 TestSetup(); | |
| 1369 | |
| 1370 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, | 1348 void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1371 type_name); | 1349 type_name); |
| 1372 EXPECT_TRUE(ptr); | 1350 EXPECT_TRUE(ptr); |
| 1373 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, | 1351 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, |
| 1374 type_name); | 1352 type_name); |
| 1375 EXPECT_TRUE(ptr2); | 1353 EXPECT_TRUE(ptr2); |
| 1376 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1354 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1377 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 1355 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1378 // This is not an immediate double-free so our immediate detection won't | 1356 // This is not an immediate double-free so our immediate detection won't |
| 1379 // fire. However, it does take the "refcount" of the partition page to -1, | 1357 // fire. However, it does take the "refcount" of the partition page to -1, |
| 1380 // which is illegal and should be trapped. | 1358 // which is illegal and should be trapped. |
| 1381 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); | 1359 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); |
| 1382 } | 1360 } |
| 1383 | 1361 |
| 1384 // Check that guard pages are present where expected. | 1362 // Check that guard pages are present where expected. |
| 1385 TEST(PartitionAllocDeathTest, GuardPages) { | 1363 TEST_F(PartitionAllocDeathTest, GuardPages) { |
| 1386 TestSetup(); | |
| 1387 | |
| 1388 // PartitionAlloc adds kPartitionPageSize to the requested size | 1364 // PartitionAlloc adds kPartitionPageSize to the requested size |
| 1389 // (for metadata), and then rounds that size to kPageAllocationGranularity. | 1365 // (for metadata), and then rounds that size to kPageAllocationGranularity. |
| 1390 // To be able to reliably write one past a direct allocation, choose a size | 1366 // To be able to reliably write one past a direct allocation, choose a size |
| 1391 // that's | 1367 // that's |
| 1392 // a) larger than kGenericMaxBucketed (to make the allocation direct) | 1368 // a) larger than kGenericMaxBucketed (to make the allocation direct) |
| 1393 // b) aligned at kPageAllocationGranularity boundaries after | 1369 // b) aligned at kPageAllocationGranularity boundaries after |
| 1394 // kPartitionPageSize has been added to it. | 1370 // kPartitionPageSize has been added to it. |
| 1395 // (On 32-bit, PartitionAlloc adds another kSystemPageSize to the | 1371 // (On 32-bit, PartitionAlloc adds another kSystemPageSize to the |
| 1396 // allocation size before rounding, but there it marks the memory right | 1372 // allocation size before rounding, but there it marks the memory right |
| 1397 // after size as inaccessible, so it's fine to write 1 past the size we | 1373 // after size as inaccessible, so it's fine to write 1 past the size we |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1411 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; | 1387 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; |
| 1412 | 1388 |
| 1413 EXPECT_DEATH(*(charPtr - 1) = 'A', ""); | 1389 EXPECT_DEATH(*(charPtr - 1) = 'A', ""); |
| 1414 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); | 1390 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); |
| 1415 | 1391 |
| 1416 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1392 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1417 } | 1393 } |
| 1418 | 1394 |
| 1419 // Check that a bad free() is caught where the free() refers to an unused | 1395 // Check that a bad free() is caught where the free() refers to an unused |
| 1420 // partition page of a large allocation. | 1396 // partition page of a large allocation. |
| 1421 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage) { | 1397 TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) { |
| 1422 TestSetup(); | |
| 1423 | |
| 1424 // This large size will result in a direct mapped allocation with guard | 1398 // This large size will result in a direct mapped allocation with guard |
| 1425 // pages at either end. | 1399 // pages at either end. |
| 1426 void* ptr = PartitionAllocGeneric(generic_allocator.root(), | 1400 void* ptr = PartitionAllocGeneric(generic_allocator.root(), |
| 1427 kPartitionPageSize * 2, type_name); | 1401 kPartitionPageSize * 2, type_name); |
| 1428 EXPECT_TRUE(ptr); | 1402 EXPECT_TRUE(ptr); |
| 1429 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; | 1403 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; |
| 1430 | 1404 |
| 1431 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), ""); | 1405 EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), ""); |
| 1432 | 1406 |
| 1433 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1407 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1434 } | 1408 } |
| 1435 | 1409 |
| 1436 #endif // !defined(OS_ANDROID) && !defined(OS_IOS) | 1410 #endif // !defined(OS_ANDROID) && !defined(OS_IOS) |
| 1437 | 1411 |
| 1438 // Tests that PartitionDumpStatsGeneric and PartitionDumpStats runs without | 1412 // Tests that |PartitionDumpStatsGeneric| and |PartitionDumpStats| run without |
| 1439 // crashing and returns non zero values when memory is allocated. | 1413 // crashing and return non-zero values when memory is allocated. |
| 1440 TEST(PartitionAllocTest, DumpMemoryStats) { | 1414 TEST_F(PartitionAllocTest, DumpMemoryStats) { |
| 1441 TestSetup(); | |
| 1442 { | 1415 { |
| 1443 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); | 1416 void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); |
| 1444 MockPartitionStatsDumper mockStatsDumper; | 1417 MockPartitionStatsDumper mockStatsDumper; |
| 1445 PartitionDumpStats(allocator.root(), "mock_allocator", | 1418 PartitionDumpStats(allocator.root(), "mock_allocator", |
| 1446 false /* detailed dump */, &mockStatsDumper); | 1419 false /* detailed dump */, &mockStatsDumper); |
| 1447 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded()); | 1420 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded()); |
| 1448 | |
| 1449 PartitionFree(ptr); | 1421 PartitionFree(ptr); |
| 1450 } | 1422 } |
| 1451 | 1423 |
| 1452 // This series of tests checks the active -> empty -> decommitted states. | 1424 // This series of tests checks the active -> empty -> decommitted states. |
| 1453 { | 1425 { |
| 1454 void* genericPtr = PartitionAllocGeneric(generic_allocator.root(), | |
| 1455 2048 - kExtraAllocSize, type_name); | |
| 1456 { | 1426 { |
| 1427 void* ptr = PartitionAllocGeneric(generic_allocator.root(), |
| 1428 2048 - kExtraAllocSize, type_name); |
| 1457 MockPartitionStatsDumper dumper; | 1429 MockPartitionStatsDumper dumper; |
| 1458 PartitionDumpStatsGeneric(generic_allocator.root(), | 1430 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1459 "mock_generic_allocator", | 1431 "mock_generic_allocator", |
| 1460 false /* detailed dump */, &dumper); | 1432 false /* detailed dump */, &dumper); |
| 1461 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); | 1433 EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); |
| 1462 | 1434 |
| 1463 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); | 1435 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1464 EXPECT_TRUE(stats); | 1436 EXPECT_TRUE(stats); |
| 1465 EXPECT_TRUE(stats->is_valid); | 1437 EXPECT_TRUE(stats->is_valid); |
| 1466 EXPECT_EQ(2048u, stats->bucket_slot_size); | 1438 EXPECT_EQ(2048u, stats->bucket_slot_size); |
| 1467 EXPECT_EQ(2048u, stats->active_bytes); | 1439 EXPECT_EQ(2048u, stats->active_bytes); |
| 1468 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); | 1440 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); |
| 1469 EXPECT_EQ(0u, stats->decommittable_bytes); | 1441 EXPECT_EQ(0u, stats->decommittable_bytes); |
| 1470 EXPECT_EQ(0u, stats->discardable_bytes); | 1442 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1471 EXPECT_EQ(0u, stats->num_full_pages); | 1443 EXPECT_EQ(0u, stats->num_full_pages); |
| 1472 EXPECT_EQ(1u, stats->num_active_pages); | 1444 EXPECT_EQ(1u, stats->num_active_pages); |
| 1473 EXPECT_EQ(0u, stats->num_empty_pages); | 1445 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1474 EXPECT_EQ(0u, stats->num_decommitted_pages); | 1446 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1447 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1475 } | 1448 } |
| 1476 | 1449 |
| 1477 PartitionFreeGeneric(generic_allocator.root(), genericPtr); | |
| 1478 | |
| 1479 { | 1450 { |
| 1480 MockPartitionStatsDumper dumper; | 1451 MockPartitionStatsDumper dumper; |
| 1481 PartitionDumpStatsGeneric(generic_allocator.root(), | 1452 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1482 "mock_generic_allocator", | 1453 "mock_generic_allocator", |
| 1483 false /* detailed dump */, &dumper); | 1454 false /* detailed dump */, &dumper); |
| 1484 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); | 1455 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1485 | 1456 |
| 1486 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); | 1457 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| 1487 EXPECT_TRUE(stats); | 1458 EXPECT_TRUE(stats); |
| 1488 EXPECT_TRUE(stats->is_valid); | 1459 EXPECT_TRUE(stats->is_valid); |
| 1489 EXPECT_EQ(2048u, stats->bucket_slot_size); | 1460 EXPECT_EQ(2048u, stats->bucket_slot_size); |
| 1490 EXPECT_EQ(0u, stats->active_bytes); | 1461 EXPECT_EQ(0u, stats->active_bytes); |
| 1491 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); | 1462 EXPECT_EQ(kSystemPageSize, stats->resident_bytes); |
| 1492 EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); | 1463 EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); |
| 1493 EXPECT_EQ(0u, stats->discardable_bytes); | 1464 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1494 EXPECT_EQ(0u, stats->num_full_pages); | 1465 EXPECT_EQ(0u, stats->num_full_pages); |
| 1495 EXPECT_EQ(0u, stats->num_active_pages); | 1466 EXPECT_EQ(0u, stats->num_active_pages); |
| 1496 EXPECT_EQ(1u, stats->num_empty_pages); | 1467 EXPECT_EQ(1u, stats->num_empty_pages); |
| 1497 EXPECT_EQ(0u, stats->num_decommitted_pages); | 1468 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1498 } | 1469 } |
| 1499 | 1470 |
| 1471 // TODO(crbug.com/722911): Commenting this out causes this test to fail when |
| 1472 // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not |
| 1473 // when run with the others (--gtest_filter=PartitionAllocTest.*). |
| 1500 CycleGenericFreeCache(kTestAllocSize); | 1474 CycleGenericFreeCache(kTestAllocSize); |
| 1501 | 1475 |
| 1502 { | 1476 { |
| 1503 MockPartitionStatsDumper dumper; | 1477 MockPartitionStatsDumper dumper; |
| 1504 PartitionDumpStatsGeneric(generic_allocator.root(), | 1478 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1505 "mock_generic_allocator", | 1479 "mock_generic_allocator", |
| 1506 false /* detailed dump */, &dumper); | 1480 false /* detailed dump */, &dumper); |
| 1507 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); | 1481 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1508 | 1482 |
| 1509 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); | 1483 const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1605 EXPECT_EQ(0u, stats->discardable_bytes); | 1579 EXPECT_EQ(0u, stats->discardable_bytes); |
| 1606 EXPECT_EQ(1u, stats->num_full_pages); | 1580 EXPECT_EQ(1u, stats->num_full_pages); |
| 1607 EXPECT_EQ(0u, stats->num_active_pages); | 1581 EXPECT_EQ(0u, stats->num_active_pages); |
| 1608 EXPECT_EQ(0u, stats->num_empty_pages); | 1582 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1609 EXPECT_EQ(0u, stats->num_decommitted_pages); | 1583 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1610 } | 1584 } |
| 1611 | 1585 |
| 1612 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 1586 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1613 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1587 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1614 | 1588 |
| 1615 // Whilst we're here, allocate again and free with different ordering | 1589 // Whilst we're here, allocate again and free with different ordering to |
| 1616 // to give a workout to our linked list code. | 1590 // give a workout to our linked list code. |
| 1617 ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, | 1591 ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, |
| 1618 type_name); | 1592 type_name); |
| 1619 ptr2 = | 1593 ptr2 = |
| 1620 PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); | 1594 PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); |
| 1621 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1595 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1622 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 1596 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1623 } | 1597 } |
| 1624 | 1598 |
| 1625 // This test checks large-but-not-quite-direct allocations. | 1599 // This test checks large-but-not-quite-direct allocations. |
| 1626 { | 1600 { |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1703 EXPECT_EQ(0u, stats->num_active_pages); | 1677 EXPECT_EQ(0u, stats->num_active_pages); |
| 1704 EXPECT_EQ(0u, stats->num_empty_pages); | 1678 EXPECT_EQ(0u, stats->num_empty_pages); |
| 1705 EXPECT_EQ(0u, stats->num_decommitted_pages); | 1679 EXPECT_EQ(0u, stats->num_decommitted_pages); |
| 1706 } | 1680 } |
| 1707 | 1681 |
| 1708 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 1682 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1709 } | 1683 } |
| 1710 } | 1684 } |
| 1711 | 1685 |
| 1712 // Tests the API to purge freeable memory. | 1686 // Tests the API to purge freeable memory. |
| 1713 TEST(PartitionAllocTest, Purge) { | 1687 TEST_F(PartitionAllocTest, Purge) { |
| 1714 TestSetup(); | |
| 1715 | |
| 1716 char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric( | 1688 char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric( |
| 1717 generic_allocator.root(), 2048 - kExtraAllocSize, type_name)); | 1689 generic_allocator.root(), 2048 - kExtraAllocSize, type_name)); |
| 1718 PartitionFreeGeneric(generic_allocator.root(), ptr); | 1690 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 1719 { | 1691 { |
| 1720 MockPartitionStatsDumper dumper; | 1692 MockPartitionStatsDumper dumper; |
| 1721 PartitionDumpStatsGeneric(generic_allocator.root(), | 1693 PartitionDumpStatsGeneric(generic_allocator.root(), |
| 1722 "mock_generic_allocator", | 1694 "mock_generic_allocator", |
| 1723 false /* detailed dump */, &dumper); | 1695 false /* detailed dump */, &dumper); |
| 1724 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); | 1696 EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); |
| 1725 | 1697 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1755 PartitionPurgeMemoryGeneric(generic_allocator.root(), | 1727 PartitionPurgeMemoryGeneric(generic_allocator.root(), |
| 1756 PartitionPurgeDecommitEmptyPages); | 1728 PartitionPurgeDecommitEmptyPages); |
| 1757 | 1729 |
| 1758 CheckPageInCore(ptr - kPointerOffset, false); | 1730 CheckPageInCore(ptr - kPointerOffset, false); |
| 1759 CheckPageInCore(bigPtr - kPointerOffset, false); | 1731 CheckPageInCore(bigPtr - kPointerOffset, false); |
| 1760 } | 1732 } |
| 1761 | 1733 |
| 1762 // Tests that we prefer to allocate into a non-empty partition page over an | 1734 // Tests that we prefer to allocate into a non-empty partition page over an |
| 1763 // empty one. This is an important aspect of minimizing memory usage for some | 1735 // empty one. This is an important aspect of minimizing memory usage for some |
| 1764 // allocation sizes, particularly larger ones. | 1736 // allocation sizes, particularly larger ones. |
| 1765 TEST(PartitionAllocTest, PreferActiveOverEmpty) { | 1737 TEST_F(PartitionAllocTest, PreferActiveOverEmpty) { |
| 1766 TestSetup(); | |
| 1767 | |
| 1768 size_t size = (kSystemPageSize * 2) - kExtraAllocSize; | 1738 size_t size = (kSystemPageSize * 2) - kExtraAllocSize; |
| 1769 // Allocate 3 full slot spans worth of 8192-byte allocations. | 1739 // Allocate 3 full slot spans worth of 8192-byte allocations. |
| 1770 // Each slot span for this size is 16384 bytes, or 1 partition page and 2 | 1740 // Each slot span for this size is 16384 bytes, or 1 partition page and 2 |
| 1771 // slots. | 1741 // slots. |
| 1772 void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1742 void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1773 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1743 void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1774 void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1744 void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1775 void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1745 void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1776 void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1746 void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1777 void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1747 void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1806 // empty. We used to fail that. | 1776 // empty. We used to fail that. |
| 1807 void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); | 1777 void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); |
| 1808 EXPECT_EQ(ptr6, ptr7); | 1778 EXPECT_EQ(ptr6, ptr7); |
| 1809 EXPECT_EQ(page3, bucket->active_pages_head); | 1779 EXPECT_EQ(page3, bucket->active_pages_head); |
| 1810 | 1780 |
| 1811 PartitionFreeGeneric(generic_allocator.root(), ptr5); | 1781 PartitionFreeGeneric(generic_allocator.root(), ptr5); |
| 1812 PartitionFreeGeneric(generic_allocator.root(), ptr7); | 1782 PartitionFreeGeneric(generic_allocator.root(), ptr7); |
| 1813 } | 1783 } |
| 1814 | 1784 |
| 1815 // Tests the API to purge discardable memory. | 1785 // Tests the API to purge discardable memory. |
| 1816 TEST(PartitionAllocTest, PurgeDiscardable) { | 1786 TEST_F(PartitionAllocTest, PurgeDiscardable) { |
| 1817 TestSetup(); | |
| 1818 | |
| 1819 // Free the second of two 4096 byte allocations and then purge. | 1787 // Free the second of two 4096 byte allocations and then purge. |
| 1820 { | 1788 { |
| 1821 void* ptr1 = PartitionAllocGeneric( | 1789 void* ptr1 = PartitionAllocGeneric( |
| 1822 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); | 1790 generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); |
| 1823 char* ptr2 = reinterpret_cast<char*>( | 1791 char* ptr2 = reinterpret_cast<char*>( |
| 1824 PartitionAllocGeneric(generic_allocator.root(), | 1792 PartitionAllocGeneric(generic_allocator.root(), |
| 1825 kSystemPageSize - kExtraAllocSize, type_name)); | 1793 kSystemPageSize - kExtraAllocSize, type_name)); |
| 1826 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 1794 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 1827 PartitionPage* page = | 1795 PartitionPage* page = |
| 1828 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); | 1796 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); |
| (...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2081 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false); | 2049 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false); |
| 2082 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); | 2050 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); |
| 2083 | 2051 |
| 2084 EXPECT_FALSE(page->freelist_head); | 2052 EXPECT_FALSE(page->freelist_head); |
| 2085 | 2053 |
| 2086 PartitionFreeGeneric(generic_allocator.root(), ptr1); | 2054 PartitionFreeGeneric(generic_allocator.root(), ptr1); |
| 2087 PartitionFreeGeneric(generic_allocator.root(), ptr2); | 2055 PartitionFreeGeneric(generic_allocator.root(), ptr2); |
| 2088 } | 2056 } |
| 2089 } | 2057 } |
| 2090 | 2058 |
| 2091 TEST(PartitionAllocTest, ReallocMovesCookies) { | 2059 TEST_F(PartitionAllocTest, ReallocMovesCookies) { |
| 2092 TestSetup(); | |
| 2093 | |
| 2094 // Resize so as to be sure to hit a "resize in place" case, and ensure that | 2060 // Resize so as to be sure to hit a "resize in place" case, and ensure that |
| 2095 // use of the entire result is compatible with the debug mode's cookies, even | 2061 // use of the entire result is compatible with the debug mode's cookies, even |
| 2096 // when the bucket size is large enough to span more than one partition page | 2062 // when the bucket size is large enough to span more than one partition page |
| 2097 // and we can track the "raw" size. See https://crbug.com/709271 | 2063 // and we can track the "raw" size. See https://crbug.com/709271 |
| 2098 const size_t kSize = base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize; | 2064 const size_t kSize = base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize; |
| 2099 void* ptr = | 2065 void* ptr = |
| 2100 PartitionAllocGeneric(generic_allocator.root(), kSize + 1, type_name); | 2066 PartitionAllocGeneric(generic_allocator.root(), kSize + 1, type_name); |
| 2101 EXPECT_TRUE(ptr); | 2067 EXPECT_TRUE(ptr); |
| 2102 | 2068 |
| 2103 memset(ptr, 0xbd, kSize + 1); | 2069 memset(ptr, 0xbd, kSize + 1); |
| 2104 ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 2, | 2070 ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 2, |
| 2105 type_name); | 2071 type_name); |
| 2106 EXPECT_TRUE(ptr); | 2072 EXPECT_TRUE(ptr); |
| 2107 | 2073 |
| 2108 memset(ptr, 0xbd, kSize + 2); | 2074 memset(ptr, 0xbd, kSize + 2); |
| 2109 PartitionFreeGeneric(generic_allocator.root(), ptr); | 2075 PartitionFreeGeneric(generic_allocator.root(), ptr); |
| 2110 } | 2076 } |
| 2111 | 2077 |
| 2112 } // namespace base | 2078 } // namespace base |
| 2113 | 2079 |
| 2114 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 2080 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| OLD | NEW |