| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/allocator/partition_allocator/partition_alloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
| 6 | 6 |
| 7 #include <string.h> | 7 #include <string.h> |
| 8 | 8 |
| 9 #include "base/allocator/oom.h" | 9 #include "base/allocator/oom.h" |
| 10 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
| (...skipping 27 matching lines...) Expand all Loading... |
| 38 static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8), | 38 static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8), |
| 39 "System pages per slot span must be less than 128."); | 39 "System pages per slot span must be less than 128."); |
| 40 | 40 |
| 41 namespace base { | 41 namespace base { |
| 42 | 42 |
| 43 subtle::SpinLock PartitionRootBase::gInitializedLock; | 43 subtle::SpinLock PartitionRootBase::gInitializedLock; |
| 44 bool PartitionRootBase::gInitialized = false; | 44 bool PartitionRootBase::gInitialized = false; |
| 45 PartitionPage PartitionRootBase::gSeedPage; | 45 PartitionPage PartitionRootBase::gSeedPage; |
| 46 PartitionBucket PartitionRootBase::gPagedBucket; | 46 PartitionBucket PartitionRootBase::gPagedBucket; |
| 47 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | 47 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; |
| 48 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = | 48 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ = |
| 49 nullptr; | 49 nullptr; |
| 50 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | 50 PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr; |
| 51 | 51 |
| 52 static uint8_t partitionBucketNumSystemPages(size_t size) { | 52 static uint8_t PartitionBucketNumSystemPages(size_t size) { |
| 53 // This works out reasonably for the current bucket sizes of the generic | 53 // This works out reasonably for the current bucket sizes of the generic |
| 54 // allocator, and the current values of partition page size and constants. | 54 // allocator, and the current values of partition page size and constants. |
| 55 // Specifically, we have enough room to always pack the slots perfectly into | 55 // Specifically, we have enough room to always pack the slots perfectly into |
| 56 // some number of system pages. The only waste is the waste associated with | 56 // some number of system pages. The only waste is the waste associated with |
| 57 // unfaulted pages (i.e. wasted address space). | 57 // unfaulted pages (i.e. wasted address space). |
| 58 // TODO: we end up using a lot of system pages for very small sizes. For | 58 // TODO: we end up using a lot of system pages for very small sizes. For |
| 59 // example, we'll use 12 system pages for slot size 24. The slot size is | 59 // example, we'll use 12 system pages for slot size 24. The slot size is |
| 60 // so small that the waste would be tiny with just 4, or 1, system pages. | 60 // so small that the waste would be tiny with just 4, or 1, system pages. |
| 61 // Later, we can investigate whether there are anti-fragmentation benefits | 61 // Later, we can investigate whether there are anti-fragmentation benefits |
| 62 // to using fewer system pages. | 62 // to using fewer system pages. |
| 63 double bestWasteRatio = 1.0f; | 63 double best_waste_ratio = 1.0f; |
| 64 uint16_t bestPages = 0; | 64 uint16_t best_pages = 0; |
| 65 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { | 65 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { |
| 66 DCHECK(!(size % kSystemPageSize)); | 66 DCHECK(!(size % kSystemPageSize)); |
| 67 bestPages = static_cast<uint16_t>(size / kSystemPageSize); | 67 best_pages = static_cast<uint16_t>(size / kSystemPageSize); |
| 68 CHECK(bestPages < (1 << 8)); | 68 CHECK(best_pages < (1 << 8)); |
| 69 return static_cast<uint8_t>(bestPages); | 69 return static_cast<uint8_t>(best_pages); |
| 70 } | 70 } |
| 71 DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | 71 DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); |
| 72 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; | 72 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; |
| 73 i <= kMaxSystemPagesPerSlotSpan; ++i) { | 73 i <= kMaxSystemPagesPerSlotSpan; ++i) { |
| 74 size_t pageSize = kSystemPageSize * i; | 74 size_t page_size = kSystemPageSize * i; |
| 75 size_t numSlots = pageSize / size; | 75 size_t num_slots = page_size / size; |
| 76 size_t waste = pageSize - (numSlots * size); | 76 size_t waste = page_size - (num_slots * size); |
| 77 // Leaving a page unfaulted is not free; the page will occupy an empty page | 77 // Leaving a page unfaulted is not free; the page will occupy an empty page |
| 78 // table entry. Make a simple attempt to account for that. | 78 // table entry. Make a simple attempt to account for that. |
| 79 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | 79 size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1); |
| 80 size_t numUnfaultedPages = | 80 size_t num_unfaulted_pages = |
| 81 numRemainderPages | 81 num_remainder_pages |
| 82 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) | 82 ? (kNumSystemPagesPerPartitionPage - num_remainder_pages) |
| 83 : 0; | 83 : 0; |
| 84 waste += sizeof(void*) * numUnfaultedPages; | 84 waste += sizeof(void*) * num_unfaulted_pages; |
| 85 double wasteRatio = (double)waste / (double)pageSize; | 85 double waste_ratio = (double)waste / (double)page_size; |
| 86 if (wasteRatio < bestWasteRatio) { | 86 if (waste_ratio < best_waste_ratio) { |
| 87 bestWasteRatio = wasteRatio; | 87 best_waste_ratio = waste_ratio; |
| 88 bestPages = i; | 88 best_pages = i; |
| 89 } | 89 } |
| 90 } | 90 } |
| 91 DCHECK(bestPages > 0); | 91 DCHECK(best_pages > 0); |
| 92 CHECK(bestPages <= kMaxSystemPagesPerSlotSpan); | 92 CHECK(best_pages <= kMaxSystemPagesPerSlotSpan); |
| 93 return static_cast<uint8_t>(bestPages); | 93 return static_cast<uint8_t>(best_pages); |
| 94 } | 94 } |
| 95 | 95 |
| 96 static void partitionAllocBaseInit(PartitionRootBase* root) { | 96 static void PartitionAllocBaseInit(PartitionRootBase* root) { |
| 97 DCHECK(!root->initialized); | 97 DCHECK(!root->initialized); |
| 98 { | 98 { |
| 99 subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock); | 99 subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock); |
| 100 if (!PartitionRootBase::gInitialized) { | 100 if (!PartitionRootBase::gInitialized) { |
| 101 PartitionRootBase::gInitialized = true; | 101 PartitionRootBase::gInitialized = true; |
| 102 // We mark the seed page as free to make sure it is skipped by our | 102 // We mark the seed page as free to make sure it is skipped by our |
| 103 // logic to find a new active page. | 103 // logic to find a new active page. |
| 104 PartitionRootBase::gPagedBucket.activePagesHead = | 104 PartitionRootBase::gPagedBucket.active_pages_head = |
| 105 &PartitionRootGeneric::gSeedPage; | 105 &PartitionRootGeneric::gSeedPage; |
| 106 } | 106 } |
| 107 } | 107 } |
| 108 | 108 |
| 109 root->initialized = true; | 109 root->initialized = true; |
| 110 root->totalSizeOfCommittedPages = 0; | 110 root->total_size_of_committed_pages = 0; |
| 111 root->totalSizeOfSuperPages = 0; | 111 root->total_size_of_super_pages = 0; |
| 112 root->totalSizeOfDirectMappedPages = 0; | 112 root->total_size_of_direct_mapped_pages = 0; |
| 113 root->nextSuperPage = 0; | 113 root->next_super_page = 0; |
| 114 root->nextPartitionPage = 0; | 114 root->next_partition_page = 0; |
| 115 root->nextPartitionPageEnd = 0; | 115 root->next_partition_page_end = 0; |
| 116 root->firstExtent = 0; | 116 root->first_extent = 0; |
| 117 root->currentExtent = 0; | 117 root->current_extent = 0; |
| 118 root->directMapList = 0; | 118 root->direct_map_list = 0; |
| 119 | 119 |
| 120 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); | 120 memset(&root->global_empty_page_ring, '\0', |
| 121 root->globalEmptyPageRingIndex = 0; | 121 sizeof(root->global_empty_page_ring)); |
| 122 root->global_empty_page_ring_index = 0; |
| 122 | 123 |
| 123 // This is a "magic" value so we can test if a root pointer is valid. | 124 // This is a "magic" value so we can test if a root pointer is valid. |
| 124 root->invertedSelf = ~reinterpret_cast<uintptr_t>(root); | 125 root->inverted_self = ~reinterpret_cast<uintptr_t>(root); |
| 125 } | 126 } |
| 126 | 127 |
| 127 static void partitionBucketInitBase(PartitionBucket* bucket, | 128 static void PartitionBucketInitBase(PartitionBucket* bucket, |
| 128 PartitionRootBase* root) { | 129 PartitionRootBase* root) { |
| 129 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 130 bucket->active_pages_head = &PartitionRootGeneric::gSeedPage; |
| 130 bucket->emptyPagesHead = 0; | 131 bucket->empty_pages_head = 0; |
| 131 bucket->decommittedPagesHead = 0; | 132 bucket->decommitted_pages_head = 0; |
| 132 bucket->numFullPages = 0; | 133 bucket->num_full_pages = 0; |
| 133 bucket->numSystemPagesPerSlotSpan = | 134 bucket->num_system_pages_per_slot_span = |
| 134 partitionBucketNumSystemPages(bucket->slotSize); | 135 PartitionBucketNumSystemPages(bucket->slot_size); |
| 135 } | 136 } |
| 136 | 137 |
| 137 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { | 138 void PartitionAllocGlobalInit(void (*oom_handling_function)()) { |
| 138 DCHECK(oomHandlingFunction); | 139 DCHECK(oom_handling_function); |
| 139 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; | 140 PartitionRootBase::gOomHandlingFunction = oom_handling_function; |
| 140 } | 141 } |
| 141 | 142 |
| 142 void partitionAllocInit(PartitionRoot* root, | 143 void PartitionAllocInit(PartitionRoot* root, |
| 143 size_t numBuckets, | 144 size_t num_buckets, |
| 144 size_t maxAllocation) { | 145 size_t max_allocation) { |
| 145 partitionAllocBaseInit(root); | 146 PartitionAllocBaseInit(root); |
| 146 | 147 |
| 147 root->numBuckets = numBuckets; | 148 root->num_buckets = num_buckets; |
| 148 root->maxAllocation = maxAllocation; | 149 root->max_allocation = max_allocation; |
| 149 size_t i; | 150 size_t i; |
| 150 for (i = 0; i < root->numBuckets; ++i) { | 151 for (i = 0; i < root->num_buckets; ++i) { |
| 151 PartitionBucket* bucket = &root->buckets()[i]; | 152 PartitionBucket* bucket = &root->buckets()[i]; |
| 152 if (!i) | 153 if (!i) |
| 153 bucket->slotSize = kAllocationGranularity; | 154 bucket->slot_size = kAllocationGranularity; |
| 154 else | 155 else |
| 155 bucket->slotSize = i << kBucketShift; | 156 bucket->slot_size = i << kBucketShift; |
| 156 partitionBucketInitBase(bucket, root); | 157 PartitionBucketInitBase(bucket, root); |
| 157 } | 158 } |
| 158 } | 159 } |
| 159 | 160 |
| 160 void partitionAllocGenericInit(PartitionRootGeneric* root) { | 161 void PartitionAllocGenericInit(PartitionRootGeneric* root) { |
| 161 subtle::SpinLock::Guard guard(root->lock); | 162 subtle::SpinLock::Guard guard(root->lock); |
| 162 | 163 |
| 163 partitionAllocBaseInit(root); | 164 PartitionAllocBaseInit(root); |
| 164 | 165 |
| 165 // Precalculate some shift and mask constants used in the hot path. | 166 // Precalculate some shift and mask constants used in the hot path. |
| 166 // Example: malloc(41) == 101001 binary. | 167 // Example: malloc(41) == 101001 binary. |
| 167 // Order is 6 (1 << 6-1)==32 is highest bit set. | 168 // Order is 6 (1 << 6-1) == 32 is highest bit set. |
| 168 // orderIndex is the next three MSB == 010 == 2. | 169 // order_index is the next three MSB == 010 == 2. |
| 169 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for | 170 // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01 |
| 170 // the subOrderIndex). | 171 // for |
| 172 // the sub_order_index). |
| 171 size_t order; | 173 size_t order; |
| 172 for (order = 0; order <= kBitsPerSizet; ++order) { | 174 for (order = 0; order <= kBitsPerSizeT; ++order) { |
| 173 size_t orderIndexShift; | 175 size_t order_index_shift; |
| 174 if (order < kGenericNumBucketsPerOrderBits + 1) | 176 if (order < kGenericNumBucketsPerOrderBits + 1) |
| 175 orderIndexShift = 0; | 177 order_index_shift = 0; |
| 176 else | 178 else |
| 177 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); | 179 order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); |
| 178 root->orderIndexShifts[order] = orderIndexShift; | 180 root->order_index_shifts[order] = order_index_shift; |
| 179 size_t subOrderIndexMask; | 181 size_t sub_order_index_mask; |
| 180 if (order == kBitsPerSizet) { | 182 if (order == kBitsPerSizeT) { |
| 181 // This avoids invoking undefined behavior for an excessive shift. | 183 // This avoids invoking undefined behavior for an excessive shift. |
| 182 subOrderIndexMask = | 184 sub_order_index_mask = |
| 183 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); | 185 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); |
| 184 } else { | 186 } else { |
| 185 subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> | 187 sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> |
| 186 (kGenericNumBucketsPerOrderBits + 1); | 188 (kGenericNumBucketsPerOrderBits + 1); |
| 187 } | 189 } |
| 188 root->orderSubIndexMasks[order] = subOrderIndexMask; | 190 root->order_sub_index_masks[order] = sub_order_index_mask; |
| 189 } | 191 } |
| 190 | 192 |
| 191 // Set up the actual usable buckets first. | 193 // Set up the actual usable buckets first. |
| 192 // Note that typical values (i.e. min allocation size of 8) will result in | 194 // Note that typical values (i.e. min allocation size of 8) will result in |
| 193 // pseudo buckets (size==9 etc. or more generally, size is not a multiple | 195 // pseudo buckets (size==9 etc. or more generally, size is not a multiple |
| 194 // of the smallest allocation granularity). | 196 // of the smallest allocation granularity). |
| 195 // We avoid them in the bucket lookup map, but we tolerate them to keep the | 197 // We avoid them in the bucket lookup map, but we tolerate them to keep the |
| 196 // code simpler and the structures more generic. | 198 // code simpler and the structures more generic. |
| 197 size_t i, j; | 199 size_t i, j; |
| 198 size_t currentSize = kGenericSmallestBucket; | 200 size_t current_size = kGenericSmallestBucket; |
| 199 size_t currentIncrement = | 201 size_t currentIncrement = |
| 200 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; | 202 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; |
| 201 PartitionBucket* bucket = &root->buckets[0]; | 203 PartitionBucket* bucket = &root->buckets[0]; |
| 202 for (i = 0; i < kGenericNumBucketedOrders; ++i) { | 204 for (i = 0; i < kGenericNumBucketedOrders; ++i) { |
| 203 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 205 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 204 bucket->slotSize = currentSize; | 206 bucket->slot_size = current_size; |
| 205 partitionBucketInitBase(bucket, root); | 207 PartitionBucketInitBase(bucket, root); |
| 206 // Disable psuedo buckets so that touching them faults. | 208 // Disable psuedo buckets so that touching them faults. |
| 207 if (currentSize % kGenericSmallestBucket) | 209 if (current_size % kGenericSmallestBucket) |
| 208 bucket->activePagesHead = 0; | 210 bucket->active_pages_head = 0; |
| 209 currentSize += currentIncrement; | 211 current_size += currentIncrement; |
| 210 ++bucket; | 212 ++bucket; |
| 211 } | 213 } |
| 212 currentIncrement <<= 1; | 214 currentIncrement <<= 1; |
| 213 } | 215 } |
| 214 DCHECK(currentSize == 1 << kGenericMaxBucketedOrder); | 216 DCHECK(current_size == 1 << kGenericMaxBucketedOrder); |
| 215 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); | 217 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 216 | 218 |
| 217 // Then set up the fast size -> bucket lookup table. | 219 // Then set up the fast size -> bucket lookup table. |
| 218 bucket = &root->buckets[0]; | 220 bucket = &root->buckets[0]; |
| 219 PartitionBucket** bucketPtr = &root->bucketLookups[0]; | 221 PartitionBucket** bucketPtr = &root->bucket_lookups[0]; |
| 220 for (order = 0; order <= kBitsPerSizet; ++order) { | 222 for (order = 0; order <= kBitsPerSizeT; ++order) { |
| 221 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 223 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 222 if (order < kGenericMinBucketedOrder) { | 224 if (order < kGenericMinBucketedOrder) { |
| 223 // Use the bucket of the finest granularity for malloc(0) etc. | 225 // Use the bucket of the finest granularity for malloc(0) etc. |
| 224 *bucketPtr++ = &root->buckets[0]; | 226 *bucketPtr++ = &root->buckets[0]; |
| 225 } else if (order > kGenericMaxBucketedOrder) { | 227 } else if (order > kGenericMaxBucketedOrder) { |
| 226 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; | 228 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; |
| 227 } else { | 229 } else { |
| 228 PartitionBucket* validBucket = bucket; | 230 PartitionBucket* validBucket = bucket; |
| 229 // Skip over invalid buckets. | 231 // Skip over invalid buckets. |
| 230 while (validBucket->slotSize % kGenericSmallestBucket) | 232 while (validBucket->slot_size % kGenericSmallestBucket) |
| 231 validBucket++; | 233 validBucket++; |
| 232 *bucketPtr++ = validBucket; | 234 *bucketPtr++ = validBucket; |
| 233 bucket++; | 235 bucket++; |
| 234 } | 236 } |
| 235 } | 237 } |
| 236 } | 238 } |
| 237 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); | 239 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 238 DCHECK(bucketPtr == | 240 DCHECK(bucketPtr == |
| 239 &root->bucketLookups[0] + | 241 &root->bucket_lookups[0] + |
| 240 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); | 242 ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); |
| 241 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | 243 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
| 242 // which tries to overflow to a non-existant order. | 244 // which tries to overflow to a non-existant order. |
| 243 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 245 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
| 244 } | 246 } |
| 245 | 247 |
| 246 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { | 248 static bool PartitionAllocShutdownBucket(PartitionBucket* bucket) { |
| 247 // Failure here indicates a memory leak. | 249 // Failure here indicates a memory leak. |
| 248 bool foundLeak = bucket->numFullPages != 0; | 250 bool foundLeak = bucket->num_full_pages != 0; |
| 249 for (PartitionPage* page = bucket->activePagesHead; page; | 251 for (PartitionPage* page = bucket->active_pages_head; page; |
| 250 page = page->nextPage) | 252 page = page->next_page) |
| 251 foundLeak |= (page->numAllocatedSlots > 0); | 253 foundLeak |= (page->num_allocated_slots > 0); |
| 252 return foundLeak; | 254 return foundLeak; |
| 253 } | 255 } |
| 254 | 256 |
| 255 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { | 257 static bool PartitionAllocBaseShutdown(PartitionRootBase* root) { |
| 256 DCHECK(root->initialized); | 258 DCHECK(root->initialized); |
| 257 root->initialized = false; | 259 root->initialized = false; |
| 258 | 260 |
| 259 // Now that we've examined all partition pages in all buckets, it's safe | 261 // Now that we've examined all partition pages in all buckets, it's safe |
| 260 // to free all our super pages. Since the super page extent entries are | 262 // to free all our super pages. Since the super page extent entries are |
| 261 // stored in the super pages, we need to be careful not to access them | 263 // stored in the super pages, we need to be careful not to access them |
| 262 // after we've released the corresponding super page. | 264 // after we've released the corresponding super page. |
| 263 PartitionSuperPageExtentEntry* entry = root->firstExtent; | 265 PartitionSuperPageExtentEntry* entry = root->first_extent; |
| 264 while (entry) { | 266 while (entry) { |
| 265 PartitionSuperPageExtentEntry* nextEntry = entry->next; | 267 PartitionSuperPageExtentEntry* nextEntry = entry->next; |
| 266 char* superPage = entry->superPageBase; | 268 char* super_page = entry->super_page_base; |
| 267 char* superPagesEnd = entry->superPagesEnd; | 269 char* super_pages_end = entry->super_pages_end; |
| 268 while (superPage < superPagesEnd) { | 270 while (super_page < super_pages_end) { |
| 269 freePages(superPage, kSuperPageSize); | 271 FreePages(super_page, kSuperPageSize); |
| 270 superPage += kSuperPageSize; | 272 super_page += kSuperPageSize; |
| 271 } | 273 } |
| 272 entry = nextEntry; | 274 entry = nextEntry; |
| 273 } | 275 } |
| 274 return root->directMapList != nullptr; | 276 return root->direct_map_list != nullptr; |
| 275 } | 277 } |
| 276 | 278 |
| 277 bool partitionAllocShutdown(PartitionRoot* root) { | 279 bool PartitionAllocShutdown(PartitionRoot* root) { |
| 278 bool foundLeak = false; | 280 bool foundLeak = false; |
| 279 size_t i; | 281 size_t i; |
| 280 for (i = 0; i < root->numBuckets; ++i) { | 282 for (i = 0; i < root->num_buckets; ++i) { |
| 281 PartitionBucket* bucket = &root->buckets()[i]; | 283 PartitionBucket* bucket = &root->buckets()[i]; |
| 282 foundLeak |= partitionAllocShutdownBucket(bucket); | 284 foundLeak |= PartitionAllocShutdownBucket(bucket); |
| 283 } | 285 } |
| 284 foundLeak |= partitionAllocBaseShutdown(root); | 286 foundLeak |= PartitionAllocBaseShutdown(root); |
| 285 return !foundLeak; | 287 return !foundLeak; |
| 286 } | 288 } |
| 287 | 289 |
| 288 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { | 290 bool PartitionAllocGenericShutdown(PartitionRootGeneric* root) { |
| 289 subtle::SpinLock::Guard guard(root->lock); | 291 subtle::SpinLock::Guard guard(root->lock); |
| 290 bool foundLeak = false; | 292 bool foundLeak = false; |
| 291 size_t i; | 293 size_t i; |
| 292 for (i = 0; i < kGenericNumBuckets; ++i) { | 294 for (i = 0; i < kGenericNumBuckets; ++i) { |
| 293 PartitionBucket* bucket = &root->buckets[i]; | 295 PartitionBucket* bucket = &root->buckets[i]; |
| 294 foundLeak |= partitionAllocShutdownBucket(bucket); | 296 foundLeak |= PartitionAllocShutdownBucket(bucket); |
| 295 } | 297 } |
| 296 foundLeak |= partitionAllocBaseShutdown(root); | 298 foundLeak |= PartitionAllocBaseShutdown(root); |
| 297 return !foundLeak; | 299 return !foundLeak; |
| 298 } | 300 } |
| 299 | 301 |
| 300 #if !defined(ARCH_CPU_64_BITS) | 302 #if !defined(ARCH_CPU_64_BITS) |
| 301 static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { | 303 static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { |
| 302 OOM_CRASH(); | 304 OOM_CRASH(); |
| 303 } | 305 } |
| 304 #endif | 306 #endif |
| 305 | 307 |
| 306 static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) { | 308 static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) { |
| 307 #if !defined(ARCH_CPU_64_BITS) | 309 #if !defined(ARCH_CPU_64_BITS) |
| 308 // Check whether this OOM is due to a lot of super pages that are allocated | 310 // Check whether this OOM is due to a lot of super pages that are allocated |
| 309 // but not committed, probably due to http://crbug.com/421387. | 311 // but not committed, probably due to http://crbug.com/421387. |
| 310 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - | 312 if (root->total_size_of_super_pages + |
| 311 root->totalSizeOfCommittedPages > | 313 root->total_size_of_direct_mapped_pages - |
| 314 root->total_size_of_committed_pages > |
| 312 kReasonableSizeOfUnusedPages) { | 315 kReasonableSizeOfUnusedPages) { |
| 313 partitionOutOfMemoryWithLotsOfUncommitedPages(); | 316 partitionOutOfMemoryWithLotsOfUncommitedPages(); |
| 314 } | 317 } |
| 315 #endif | 318 #endif |
| 316 if (PartitionRootBase::gOomHandlingFunction) | 319 if (PartitionRootBase::gOomHandlingFunction) |
| 317 (*PartitionRootBase::gOomHandlingFunction)(); | 320 (*PartitionRootBase::gOomHandlingFunction)(); |
| 318 OOM_CRASH(); | 321 OOM_CRASH(); |
| 319 } | 322 } |
| 320 | 323 |
| 321 static NOINLINE void partitionExcessiveAllocationSize() { | 324 static NOINLINE void partitionExcessiveAllocationSize() { |
| 322 OOM_CRASH(); | 325 OOM_CRASH(); |
| 323 } | 326 } |
| 324 | 327 |
| 325 static NOINLINE void partitionBucketFull() { | 328 static NOINLINE void partitionBucketFull() { |
| 326 OOM_CRASH(); | 329 OOM_CRASH(); |
| 327 } | 330 } |
| 328 | 331 |
| 329 // partitionPageStateIs* | 332 // partitionPageStateIs* |
| 330 // Note that it's only valid to call these functions on pages found on one of | 333 // Note that it's only valid to call these functions on pages found on one of |
| 331 // the page lists. Specifically, you can't call these functions on full pages | 334 // the page lists. Specifically, you can't call these functions on full pages |
| 332 // that were detached from the active list. | 335 // that were detached from the active list. |
| 333 static bool ALWAYS_INLINE | 336 static bool ALWAYS_INLINE |
| 334 partitionPageStateIsActive(const PartitionPage* page) { | 337 PartitionPageStateIsActive(const PartitionPage* page) { |
| 335 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 338 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 336 DCHECK(!page->pageOffset); | 339 DCHECK(!page->page_offset); |
| 337 return (page->numAllocatedSlots > 0 && | 340 return (page->num_allocated_slots > 0 && |
| 338 (page->freelistHead || page->numUnprovisionedSlots)); | 341 (page->freelist_head || page->num_unprovisioned_slots)); |
| 339 } | 342 } |
| 340 | 343 |
| 341 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { | 344 static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) { |
| 342 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 345 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 343 DCHECK(!page->pageOffset); | 346 DCHECK(!page->page_offset); |
| 344 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); | 347 bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket)); |
| 345 if (ret) { | 348 if (ret) { |
| 346 DCHECK(!page->freelistHead); | 349 DCHECK(!page->freelist_head); |
| 347 DCHECK(!page->numUnprovisionedSlots); | 350 DCHECK(!page->num_unprovisioned_slots); |
| 348 } | 351 } |
| 349 return ret; | 352 return ret; |
| 350 } | 353 } |
| 351 | 354 |
| 352 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { | 355 static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) { |
| 353 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 356 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 354 DCHECK(!page->pageOffset); | 357 DCHECK(!page->page_offset); |
| 355 return (!page->numAllocatedSlots && page->freelistHead); | 358 return (!page->num_allocated_slots && page->freelist_head); |
| 356 } | 359 } |
| 357 | 360 |
| 358 static bool ALWAYS_INLINE | 361 static bool ALWAYS_INLINE |
| 359 partitionPageStateIsDecommitted(const PartitionPage* page) { | 362 PartitionPageStateIsDecommitted(const PartitionPage* page) { |
| 360 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 363 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 361 DCHECK(!page->pageOffset); | 364 DCHECK(!page->page_offset); |
| 362 bool ret = (!page->numAllocatedSlots && !page->freelistHead); | 365 bool ret = (!page->num_allocated_slots && !page->freelist_head); |
| 363 if (ret) { | 366 if (ret) { |
| 364 DCHECK(!page->numUnprovisionedSlots); | 367 DCHECK(!page->num_unprovisioned_slots); |
| 365 DCHECK(page->emptyCacheIndex == -1); | 368 DCHECK(page->empty_cache_index == -1); |
| 366 } | 369 } |
| 367 return ret; | 370 return ret; |
| 368 } | 371 } |
| 369 | 372 |
| 370 static void partitionIncreaseCommittedPages(PartitionRootBase* root, | 373 static void partitionIncreaseCommittedPages(PartitionRootBase* root, |
| 371 size_t len) { | 374 size_t len) { |
| 372 root->totalSizeOfCommittedPages += len; | 375 root->total_size_of_committed_pages += len; |
| 373 DCHECK(root->totalSizeOfCommittedPages <= | 376 DCHECK(root->total_size_of_committed_pages <= |
| 374 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 377 root->total_size_of_super_pages + |
| 378 root->total_size_of_direct_mapped_pages); |
| 375 } | 379 } |
| 376 | 380 |
| 377 static void partitionDecreaseCommittedPages(PartitionRootBase* root, | 381 static void partitionDecreaseCommittedPages(PartitionRootBase* root, |
| 378 size_t len) { | 382 size_t len) { |
| 379 root->totalSizeOfCommittedPages -= len; | 383 root->total_size_of_committed_pages -= len; |
| 380 DCHECK(root->totalSizeOfCommittedPages <= | 384 DCHECK(root->total_size_of_committed_pages <= |
| 381 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 385 root->total_size_of_super_pages + |
| 386 root->total_size_of_direct_mapped_pages); |
| 382 } | 387 } |
| 383 | 388 |
| 384 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, | 389 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, |
| 385 void* addr, | 390 void* address, |
| 386 size_t len) { | 391 size_t length) { |
| 387 decommitSystemPages(addr, len); | 392 DecommitSystemPages(address, length); |
| 388 partitionDecreaseCommittedPages(root, len); | 393 partitionDecreaseCommittedPages(root, length); |
| 389 } | 394 } |
| 390 | 395 |
| 391 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, | 396 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, |
| 392 void* addr, | 397 void* address, |
| 393 size_t len) { | 398 size_t length) { |
| 394 recommitSystemPages(addr, len); | 399 RecommitSystemPages(address, length); |
| 395 partitionIncreaseCommittedPages(root, len); | 400 partitionIncreaseCommittedPages(root, length); |
| 396 } | 401 } |
| 397 | 402 |
| 398 static ALWAYS_INLINE void* partitionAllocPartitionPages( | 403 static ALWAYS_INLINE void* PartitionAllocPartitionPages( |
| 399 PartitionRootBase* root, | 404 PartitionRootBase* root, |
| 400 int flags, | 405 int flags, |
| 401 uint16_t numPartitionPages) { | 406 uint16_t num_partition_pages) { |
| 402 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % | 407 DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) % |
| 403 kPartitionPageSize)); | 408 kPartitionPageSize)); |
| 404 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % | 409 DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) % |
| 405 kPartitionPageSize)); | 410 kPartitionPageSize)); |
| 406 DCHECK(numPartitionPages <= kNumPartitionPagesPerSuperPage); | 411 DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage); |
| 407 size_t totalSize = kPartitionPageSize * numPartitionPages; | 412 size_t total_size = kPartitionPageSize * num_partition_pages; |
| 408 size_t numPartitionPagesLeft = | 413 size_t num_partition_pages_left = |
| 409 (root->nextPartitionPageEnd - root->nextPartitionPage) >> | 414 (root->next_partition_page_end - root->next_partition_page) >> |
| 410 kPartitionPageShift; | 415 kPartitionPageShift; |
| 411 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | 416 if (LIKELY(num_partition_pages_left >= num_partition_pages)) { |
| 412 // In this case, we can still hand out pages from the current super page | 417 // In this case, we can still hand out pages from the current super page |
| 413 // allocation. | 418 // allocation. |
| 414 char* ret = root->nextPartitionPage; | 419 char* ret = root->next_partition_page; |
| 415 root->nextPartitionPage += totalSize; | 420 root->next_partition_page += total_size; |
| 416 partitionIncreaseCommittedPages(root, totalSize); | 421 partitionIncreaseCommittedPages(root, total_size); |
| 417 return ret; | 422 return ret; |
| 418 } | 423 } |
| 419 | 424 |
| 420 // Need a new super page. We want to allocate super pages in a continguous | 425 // Need a new super page. We want to allocate super pages in a continguous |
| 421 // address region as much as possible. This is important for not causing | 426 // address region as much as possible. This is important for not causing |
| 422 // page table bloat and not fragmenting address spaces in 32 bit | 427 // page table bloat and not fragmenting address spaces in 32 bit |
| 423 // architectures. | 428 // architectures. |
| 424 char* requestedAddress = root->nextSuperPage; | 429 char* requestedAddress = root->next_super_page; |
| 425 char* superPage = reinterpret_cast<char*>(allocPages( | 430 char* super_page = reinterpret_cast<char*>(AllocPages( |
| 426 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); | 431 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); |
| 427 if (UNLIKELY(!superPage)) | 432 if (UNLIKELY(!super_page)) |
| 428 return 0; | 433 return 0; |
| 429 | 434 |
| 430 root->totalSizeOfSuperPages += kSuperPageSize; | 435 root->total_size_of_super_pages += kSuperPageSize; |
| 431 partitionIncreaseCommittedPages(root, totalSize); | 436 partitionIncreaseCommittedPages(root, total_size); |
| 432 | 437 |
| 433 root->nextSuperPage = superPage + kSuperPageSize; | 438 root->next_super_page = super_page + kSuperPageSize; |
| 434 char* ret = superPage + kPartitionPageSize; | 439 char* ret = super_page + kPartitionPageSize; |
| 435 root->nextPartitionPage = ret + totalSize; | 440 root->next_partition_page = ret + total_size; |
| 436 root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; | 441 root->next_partition_page_end = root->next_super_page - kPartitionPageSize; |
| 437 // Make the first partition page in the super page a guard page, but leave a | 442 // Make the first partition page in the super page a guard page, but leave a |
| 438 // hole in the middle. | 443 // hole in the middle. |
| 439 // This is where we put page metadata and also a tiny amount of extent | 444 // This is where we put page metadata and also a tiny amount of extent |
| 440 // metadata. | 445 // metadata. |
| 441 setSystemPagesInaccessible(superPage, kSystemPageSize); | 446 SetSystemPagesInaccessible(super_page, kSystemPageSize); |
| 442 setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), | 447 SetSystemPagesInaccessible(super_page + (kSystemPageSize * 2), |
| 443 kPartitionPageSize - (kSystemPageSize * 2)); | 448 kPartitionPageSize - (kSystemPageSize * 2)); |
| 444 // Also make the last partition page a guard page. | 449 // Also make the last partition page a guard page. |
| 445 setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), | 450 SetSystemPagesInaccessible(super_page + (kSuperPageSize - kPartitionPageSize), |
| 446 kPartitionPageSize); | 451 kPartitionPageSize); |
| 447 | 452 |
| 448 // If we were after a specific address, but didn't get it, assume that | 453 // If we were after a specific address, but didn't get it, assume that |
| 449 // the system chose a lousy address. Here most OS'es have a default | 454 // the system chose a lousy address. Here most OS'es have a default |
| 450 // algorithm that isn't randomized. For example, most Linux | 455 // algorithm that isn't randomized. For example, most Linux |
| 451 // distributions will allocate the mapping directly before the last | 456 // distributions will allocate the mapping directly before the last |
| 452 // successful mapping, which is far from random. So we just get fresh | 457 // successful mapping, which is far from random. So we just get fresh |
| 453 // randomness for the next mapping attempt. | 458 // randomness for the next mapping attempt. |
| 454 if (requestedAddress && requestedAddress != superPage) | 459 if (requestedAddress && requestedAddress != super_page) |
| 455 root->nextSuperPage = 0; | 460 root->next_super_page = 0; |
| 456 | 461 |
| 457 // We allocated a new super page so update super page metadata. | 462 // We allocated a new super page so update super page metadata. |
| 458 // First check if this is a new extent or not. | 463 // First check if this is a new extent or not. |
| 459 PartitionSuperPageExtentEntry* latestExtent = | 464 PartitionSuperPageExtentEntry* latest_extent = |
| 460 reinterpret_cast<PartitionSuperPageExtentEntry*>( | 465 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 461 partitionSuperPageToMetadataArea(superPage)); | 466 PartitionSuperPageToMetadataArea(super_page)); |
| 462 // By storing the root in every extent metadata object, we have a fast way | 467 // By storing the root in every extent metadata object, we have a fast way |
| 463 // to go from a pointer within the partition to the root object. | 468 // to go from a pointer within the partition to the root object. |
| 464 latestExtent->root = root; | 469 latest_extent->root = root; |
| 465 // Most new extents will be part of a larger extent, and these three fields | 470 // Most new extents will be part of a larger extent, and these three fields |
| 466 // are unused, but we initialize them to 0 so that we get a clear signal | 471 // are unused, but we initialize them to 0 so that we get a clear signal |
| 467 // in case they are accidentally used. | 472 // in case they are accidentally used. |
| 468 latestExtent->superPageBase = 0; | 473 latest_extent->super_page_base = 0; |
| 469 latestExtent->superPagesEnd = 0; | 474 latest_extent->super_pages_end = 0; |
| 470 latestExtent->next = 0; | 475 latest_extent->next = 0; |
| 471 | 476 |
| 472 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; | 477 PartitionSuperPageExtentEntry* current_extent = root->current_extent; |
| 473 bool isNewExtent = (superPage != requestedAddress); | 478 bool isNewExtent = (super_page != requestedAddress); |
| 474 if (UNLIKELY(isNewExtent)) { | 479 if (UNLIKELY(isNewExtent)) { |
| 475 if (UNLIKELY(!currentExtent)) { | 480 if (UNLIKELY(!current_extent)) { |
| 476 DCHECK(!root->firstExtent); | 481 DCHECK(!root->first_extent); |
| 477 root->firstExtent = latestExtent; | 482 root->first_extent = latest_extent; |
| 478 } else { | 483 } else { |
| 479 DCHECK(currentExtent->superPageBase); | 484 DCHECK(current_extent->super_page_base); |
| 480 currentExtent->next = latestExtent; | 485 current_extent->next = latest_extent; |
| 481 } | 486 } |
| 482 root->currentExtent = latestExtent; | 487 root->current_extent = latest_extent; |
| 483 latestExtent->superPageBase = superPage; | 488 latest_extent->super_page_base = super_page; |
| 484 latestExtent->superPagesEnd = superPage + kSuperPageSize; | 489 latest_extent->super_pages_end = super_page + kSuperPageSize; |
| 485 } else { | 490 } else { |
| 486 // We allocated next to an existing extent so just nudge the size up a | 491 // We allocated next to an existing extent so just nudge the size up a |
| 487 // little. | 492 // little. |
| 488 DCHECK(currentExtent->superPagesEnd); | 493 DCHECK(current_extent->super_pages_end); |
| 489 currentExtent->superPagesEnd += kSuperPageSize; | 494 current_extent->super_pages_end += kSuperPageSize; |
| 490 DCHECK(ret >= currentExtent->superPageBase && | 495 DCHECK(ret >= current_extent->super_page_base && |
| 491 ret < currentExtent->superPagesEnd); | 496 ret < current_extent->super_pages_end); |
| 492 } | 497 } |
| 493 return ret; | 498 return ret; |
| 494 } | 499 } |
| 495 | 500 |
| 496 static ALWAYS_INLINE uint16_t | 501 static ALWAYS_INLINE uint16_t |
| 497 partitionBucketPartitionPages(const PartitionBucket* bucket) { | 502 partitionBucketPartitionPages(const PartitionBucket* bucket) { |
| 498 return (bucket->numSystemPagesPerSlotSpan + | 503 return (bucket->num_system_pages_per_slot_span + |
| 499 (kNumSystemPagesPerPartitionPage - 1)) / | 504 (kNumSystemPagesPerPartitionPage - 1)) / |
| 500 kNumSystemPagesPerPartitionPage; | 505 kNumSystemPagesPerPartitionPage; |
| 501 } | 506 } |
| 502 | 507 |
| 503 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { | 508 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { |
| 504 DCHECK(partitionPageStateIsDecommitted(page)); | 509 DCHECK(PartitionPageStateIsDecommitted(page)); |
| 505 | 510 |
| 506 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); | 511 page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket); |
| 507 DCHECK(page->numUnprovisionedSlots); | 512 DCHECK(page->num_unprovisioned_slots); |
| 508 | 513 |
| 509 page->nextPage = nullptr; | 514 page->next_page = nullptr; |
| 510 } | 515 } |
| 511 | 516 |
| 512 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, | 517 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, |
| 513 PartitionBucket* bucket) { | 518 PartitionBucket* bucket) { |
| 514 // The bucket never changes. We set it up once. | 519 // The bucket never changes. We set it up once. |
| 515 page->bucket = bucket; | 520 page->bucket = bucket; |
| 516 page->emptyCacheIndex = -1; | 521 page->empty_cache_index = -1; |
| 517 | 522 |
| 518 partitionPageReset(page); | 523 partitionPageReset(page); |
| 519 | 524 |
| 520 // If this page has just a single slot, do not set up page offsets for any | 525 // If this page has just a single slot, do not set up page offsets for any |
| 521 // page metadata other than the first one. This ensures that attempts to | 526 // page metadata other than the first one. This ensures that attempts to |
| 522 // touch invalid page metadata fail. | 527 // touch invalid page metadata fail. |
| 523 if (page->numUnprovisionedSlots == 1) | 528 if (page->num_unprovisioned_slots == 1) |
| 524 return; | 529 return; |
| 525 | 530 |
| 526 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 531 uint16_t num_partition_pages = partitionBucketPartitionPages(bucket); |
| 527 char* pageCharPtr = reinterpret_cast<char*>(page); | 532 char* pageCharPtr = reinterpret_cast<char*>(page); |
| 528 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 533 for (uint16_t i = 1; i < num_partition_pages; ++i) { |
| 529 pageCharPtr += kPageMetadataSize; | 534 pageCharPtr += kPageMetadataSize; |
| 530 PartitionPage* secondaryPage = | 535 PartitionPage* secondaryPage = |
| 531 reinterpret_cast<PartitionPage*>(pageCharPtr); | 536 reinterpret_cast<PartitionPage*>(pageCharPtr); |
| 532 secondaryPage->pageOffset = i; | 537 secondaryPage->page_offset = i; |
| 533 } | 538 } |
| 534 } | 539 } |
| 535 | 540 |
| 536 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( | 541 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( |
| 537 PartitionPage* page) { | 542 PartitionPage* page) { |
| 538 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 543 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 539 uint16_t numSlots = page->numUnprovisionedSlots; | 544 uint16_t num_slots = page->num_unprovisioned_slots; |
| 540 DCHECK(numSlots); | 545 DCHECK(num_slots); |
| 541 PartitionBucket* bucket = page->bucket; | 546 PartitionBucket* bucket = page->bucket; |
| 542 // We should only get here when _every_ slot is either used or unprovisioned. | 547 // We should only get here when _every_ slot is either used or unprovisioned. |
| 543 // (The third state is "on the freelist". If we have a non-empty freelist, we | 548 // (The third state is "on the freelist". If we have a non-empty freelist, we |
| 544 // should not get here.) | 549 // should not get here.) |
| 545 DCHECK(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 550 DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket)); |
| 546 // Similarly, make explicitly sure that the freelist is empty. | 551 // Similarly, make explicitly sure that the freelist is empty. |
| 547 DCHECK(!page->freelistHead); | 552 DCHECK(!page->freelist_head); |
| 548 DCHECK(page->numAllocatedSlots >= 0); | 553 DCHECK(page->num_allocated_slots >= 0); |
| 549 | 554 |
| 550 size_t size = bucket->slotSize; | 555 size_t size = bucket->slot_size; |
| 551 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 556 char* base = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 552 char* returnObject = base + (size * page->numAllocatedSlots); | 557 char* return_object = base + (size * page->num_allocated_slots); |
| 553 char* firstFreelistPointer = returnObject + size; | 558 char* firstFreelistPointer = return_object + size; |
| 554 char* firstFreelistPointerExtent = | 559 char* firstFreelistPointerExtent = |
| 555 firstFreelistPointer + sizeof(PartitionFreelistEntry*); | 560 firstFreelistPointer + sizeof(PartitionFreelistEntry*); |
| 556 // Our goal is to fault as few system pages as possible. We calculate the | 561 // Our goal is to fault as few system pages as possible. We calculate the |
| 557 // page containing the "end" of the returned slot, and then allow freelist | 562 // page containing the "end" of the returned slot, and then allow freelist |
| 558 // pointers to be written up to the end of that page. | 563 // pointers to be written up to the end of that page. |
| 559 char* subPageLimit = reinterpret_cast<char*>( | 564 char* sub_page_limit = reinterpret_cast<char*>( |
| 560 roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); | 565 RoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); |
| 561 char* slotsLimit = returnObject + (size * numSlots); | 566 char* slots_limit = return_object + (size * num_slots); |
| 562 char* freelistLimit = subPageLimit; | 567 char* freelist_limit = sub_page_limit; |
| 563 if (UNLIKELY(slotsLimit < freelistLimit)) | 568 if (UNLIKELY(slots_limit < freelist_limit)) |
| 564 freelistLimit = slotsLimit; | 569 freelist_limit = slots_limit; |
| 565 | 570 |
| 566 uint16_t numNewFreelistEntries = 0; | 571 uint16_t num_new_freelist_entries = 0; |
| 567 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 572 if (LIKELY(firstFreelistPointerExtent <= freelist_limit)) { |
| 568 // Only consider used space in the slot span. If we consider wasted | 573 // Only consider used space in the slot span. If we consider wasted |
| 569 // space, we may get an off-by-one when a freelist pointer fits in the | 574 // space, we may get an off-by-one when a freelist pointer fits in the |
| 570 // wasted space, but a slot does not. | 575 // wasted space, but a slot does not. |
| 571 // We know we can fit at least one freelist pointer. | 576 // We know we can fit at least one freelist pointer. |
| 572 numNewFreelistEntries = 1; | 577 num_new_freelist_entries = 1; |
| 573 // Any further entries require space for the whole slot span. | 578 // Any further entries require space for the whole slot span. |
| 574 numNewFreelistEntries += static_cast<uint16_t>( | 579 num_new_freelist_entries += static_cast<uint16_t>( |
| 575 (freelistLimit - firstFreelistPointerExtent) / size); | 580 (freelist_limit - firstFreelistPointerExtent) / size); |
| 576 } | 581 } |
| 577 | 582 |
| 578 // We always return an object slot -- that's the +1 below. | 583 // We always return an object slot -- that's the +1 below. |
| 579 // We do not neccessarily create any new freelist entries, because we cross | 584 // We do not neccessarily create any new freelist entries, because we cross |
| 580 // sub page boundaries frequently for large bucket sizes. | 585 // sub page boundaries frequently for large bucket sizes. |
| 581 DCHECK(numNewFreelistEntries + 1 <= numSlots); | 586 DCHECK(num_new_freelist_entries + 1 <= num_slots); |
| 582 numSlots -= (numNewFreelistEntries + 1); | 587 num_slots -= (num_new_freelist_entries + 1); |
| 583 page->numUnprovisionedSlots = numSlots; | 588 page->num_unprovisioned_slots = num_slots; |
| 584 page->numAllocatedSlots++; | 589 page->num_allocated_slots++; |
| 585 | 590 |
| 586 if (LIKELY(numNewFreelistEntries)) { | 591 if (LIKELY(num_new_freelist_entries)) { |
| 587 char* freelistPointer = firstFreelistPointer; | 592 char* freelist_pointer = firstFreelistPointer; |
| 588 PartitionFreelistEntry* entry = | 593 PartitionFreelistEntry* entry = |
| 589 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | 594 reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); |
| 590 page->freelistHead = entry; | 595 page->freelist_head = entry; |
| 591 while (--numNewFreelistEntries) { | 596 while (--num_new_freelist_entries) { |
| 592 freelistPointer += size; | 597 freelist_pointer += size; |
| 593 PartitionFreelistEntry* nextEntry = | 598 PartitionFreelistEntry* nextEntry = |
| 594 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | 599 reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); |
| 595 entry->next = partitionFreelistMask(nextEntry); | 600 entry->next = PartitionFreelistMask(nextEntry); |
| 596 entry = nextEntry; | 601 entry = nextEntry; |
| 597 } | 602 } |
| 598 entry->next = partitionFreelistMask(0); | 603 entry->next = PartitionFreelistMask(0); |
| 599 } else { | 604 } else { |
| 600 page->freelistHead = 0; | 605 page->freelist_head = 0; |
| 601 } | 606 } |
| 602 return returnObject; | 607 return return_object; |
| 603 } | 608 } |
| 604 | 609 |
| 605 // This helper function scans a bucket's active page list for a suitable new | 610 // This helper function scans a bucket's active page list for a suitable new |
| 606 // active page. | 611 // active page. |
| 607 // When it finds a suitable new active page (one that has free slots and is not | 612 // When it finds a suitable new active page (one that has free slots and is not |
| 608 // empty), it is set as the new active page. If there is no suitable new | 613 // empty), it is set as the new active page. If there is no suitable new |
| 609 // active page, the current active page is set to the seed page. | 614 // active page, the current active page is set to the seed page. |
| 610 // As potential pages are scanned, they are tidied up according to their state. | 615 // As potential pages are scanned, they are tidied up according to their state. |
| 611 // Empty pages are swept on to the empty page list, decommitted pages on to the | 616 // Empty pages are swept on to the empty page list, decommitted pages on to the |
| 612 // decommitted page list and full pages are unlinked from any list. | 617 // decommitted page list and full pages are unlinked from any list. |
| 613 static bool partitionSetNewActivePage(PartitionBucket* bucket) { | 618 static bool partitionSetNewActivePage(PartitionBucket* bucket) { |
| 614 PartitionPage* page = bucket->activePagesHead; | 619 PartitionPage* page = bucket->active_pages_head; |
| 615 if (page == &PartitionRootBase::gSeedPage) | 620 if (page == &PartitionRootBase::gSeedPage) |
| 616 return false; | 621 return false; |
| 617 | 622 |
| 618 PartitionPage* nextPage; | 623 PartitionPage* next_page; |
| 619 | 624 |
| 620 for (; page; page = nextPage) { | 625 for (; page; page = next_page) { |
| 621 nextPage = page->nextPage; | 626 next_page = page->next_page; |
| 622 DCHECK(page->bucket == bucket); | 627 DCHECK(page->bucket == bucket); |
| 623 DCHECK(page != bucket->emptyPagesHead); | 628 DCHECK(page != bucket->empty_pages_head); |
| 624 DCHECK(page != bucket->decommittedPagesHead); | 629 DCHECK(page != bucket->decommitted_pages_head); |
| 625 | 630 |
| 626 // Deal with empty and decommitted pages. | 631 // Deal with empty and decommitted pages. |
| 627 if (LIKELY(partitionPageStateIsActive(page))) { | 632 if (LIKELY(PartitionPageStateIsActive(page))) { |
| 628 // This page is usable because it has freelist entries, or has | 633 // This page is usable because it has freelist entries, or has |
| 629 // unprovisioned slots we can create freelist entries from. | 634 // unprovisioned slots we can create freelist entries from. |
| 630 bucket->activePagesHead = page; | 635 bucket->active_pages_head = page; |
| 631 return true; | 636 return true; |
| 632 } | 637 } |
| 633 if (LIKELY(partitionPageStateIsEmpty(page))) { | 638 if (LIKELY(PartitionPageStateIsEmpty(page))) { |
| 634 page->nextPage = bucket->emptyPagesHead; | 639 page->next_page = bucket->empty_pages_head; |
| 635 bucket->emptyPagesHead = page; | 640 bucket->empty_pages_head = page; |
| 636 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { | 641 } else if (LIKELY(PartitionPageStateIsDecommitted(page))) { |
| 637 page->nextPage = bucket->decommittedPagesHead; | 642 page->next_page = bucket->decommitted_pages_head; |
| 638 bucket->decommittedPagesHead = page; | 643 bucket->decommitted_pages_head = page; |
| 639 } else { | 644 } else { |
| 640 DCHECK(partitionPageStateIsFull(page)); | 645 DCHECK(PartitionPageStateIsFull(page)); |
| 641 // If we get here, we found a full page. Skip over it too, and also | 646 // If we get here, we found a full page. Skip over it too, and also |
| 642 // tag it as full (via a negative value). We need it tagged so that | 647 // tag it as full (via a negative value). We need it tagged so that |
| 643 // free'ing can tell, and move it back into the active page list. | 648 // free'ing can tell, and move it back into the active page list. |
| 644 page->numAllocatedSlots = -page->numAllocatedSlots; | 649 page->num_allocated_slots = -page->num_allocated_slots; |
| 645 ++bucket->numFullPages; | 650 ++bucket->num_full_pages; |
| 646 // numFullPages is a uint16_t for efficient packing so guard against | 651 // num_full_pages is a uint16_t for efficient packing so guard against |
| 647 // overflow to be safe. | 652 // overflow to be safe. |
| 648 if (UNLIKELY(!bucket->numFullPages)) | 653 if (UNLIKELY(!bucket->num_full_pages)) |
| 649 partitionBucketFull(); | 654 partitionBucketFull(); |
| 650 // Not necessary but might help stop accidents. | 655 // Not necessary but might help stop accidents. |
| 651 page->nextPage = 0; | 656 page->next_page = 0; |
| 652 } | 657 } |
| 653 } | 658 } |
| 654 | 659 |
| 655 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 660 bucket->active_pages_head = &PartitionRootGeneric::gSeedPage; |
| 656 return false; | 661 return false; |
| 657 } | 662 } |
| 658 | 663 |
| 659 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( | 664 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( |
| 660 PartitionPage* page) { | 665 PartitionPage* page) { |
| 661 DCHECK(partitionBucketIsDirectMapped(page->bucket)); | 666 DCHECK(PartitionBucketIsDirectMapped(page->bucket)); |
| 662 return reinterpret_cast<PartitionDirectMapExtent*>( | 667 return reinterpret_cast<PartitionDirectMapExtent*>( |
| 663 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); | 668 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); |
| 664 } | 669 } |
| 665 | 670 |
| 666 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, | 671 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, |
| 667 size_t size) { | 672 size_t size) { |
| 668 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 673 size_t* raw_sizePtr = PartitionPageGetRawSizePtr(page); |
| 669 if (UNLIKELY(rawSizePtr != nullptr)) | 674 if (UNLIKELY(raw_sizePtr != nullptr)) |
| 670 *rawSizePtr = size; | 675 *raw_sizePtr = size; |
| 671 } | 676 } |
| 672 | 677 |
| 673 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, | 678 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, |
| 674 int flags, | 679 int flags, |
| 675 size_t rawSize) { | 680 size_t raw_size) { |
| 676 size_t size = partitionDirectMapSize(rawSize); | 681 size_t size = PartitionDirectMapSize(raw_size); |
| 677 | 682 |
| 678 // Because we need to fake looking like a super page, we need to allocate | 683 // Because we need to fake looking like a super page, we need to allocate |
| 679 // a bunch of system pages more than "size": | 684 // a bunch of system pages more than "size": |
| 680 // - The first few system pages are the partition page in which the super | 685 // - The first few system pages are the partition page in which the super |
| 681 // page metadata is stored. We fault just one system page out of a partition | 686 // page metadata is stored. We fault just one system page out of a partition |
| 682 // page sized clump. | 687 // page sized clump. |
| 683 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | 688 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the |
| 684 // massive address space plus randomization instead). | 689 // massive address space plus randomization instead). |
| 685 size_t mapSize = size + kPartitionPageSize; | 690 size_t map_size = size + kPartitionPageSize; |
| 686 #if !defined(ARCH_CPU_64_BITS) | 691 #if !defined(ARCH_CPU_64_BITS) |
| 687 mapSize += kSystemPageSize; | 692 map_size += kSystemPageSize; |
| 688 #endif | 693 #endif |
| 689 // Round up to the allocation granularity. | 694 // Round up to the allocation granularity. |
| 690 mapSize += kPageAllocationGranularityOffsetMask; | 695 map_size += kPageAllocationGranularityOffsetMask; |
| 691 mapSize &= kPageAllocationGranularityBaseMask; | 696 map_size &= kPageAllocationGranularityBaseMask; |
| 692 | 697 |
| 693 // TODO: these pages will be zero-filled. Consider internalizing an | 698 // TODO: these pages will be zero-filled. Consider internalizing an |
| 694 // allocZeroed() API so we can avoid a memset() entirely in this case. | 699 // allocZeroed() API so we can avoid a memset() entirely in this case. |
| 695 char* ptr = reinterpret_cast<char*>( | 700 char* ptr = reinterpret_cast<char*>( |
| 696 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); | 701 AllocPages(0, map_size, kSuperPageSize, PageAccessible)); |
| 697 if (UNLIKELY(!ptr)) | 702 if (UNLIKELY(!ptr)) |
| 698 return nullptr; | 703 return nullptr; |
| 699 | 704 |
| 700 size_t committedPageSize = size + kSystemPageSize; | 705 size_t committedPageSize = size + kSystemPageSize; |
| 701 root->totalSizeOfDirectMappedPages += committedPageSize; | 706 root->total_size_of_direct_mapped_pages += committedPageSize; |
| 702 partitionIncreaseCommittedPages(root, committedPageSize); | 707 partitionIncreaseCommittedPages(root, committedPageSize); |
| 703 | 708 |
| 704 char* slot = ptr + kPartitionPageSize; | 709 char* slot = ptr + kPartitionPageSize; |
| 705 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), | 710 SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2), |
| 706 kPartitionPageSize - (kSystemPageSize * 2)); | 711 kPartitionPageSize - (kSystemPageSize * 2)); |
| 707 #if !defined(ARCH_CPU_64_BITS) | 712 #if !defined(ARCH_CPU_64_BITS) |
| 708 setSystemPagesInaccessible(ptr, kSystemPageSize); | 713 SetSystemPagesInaccessible(ptr, kSystemPageSize); |
| 709 setSystemPagesInaccessible(slot + size, kSystemPageSize); | 714 SetSystemPagesInaccessible(slot + size, kSystemPageSize); |
| 710 #endif | 715 #endif |
| 711 | 716 |
| 712 PartitionSuperPageExtentEntry* extent = | 717 PartitionSuperPageExtentEntry* extent = |
| 713 reinterpret_cast<PartitionSuperPageExtentEntry*>( | 718 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 714 partitionSuperPageToMetadataArea(ptr)); | 719 PartitionSuperPageToMetadataArea(ptr)); |
| 715 extent->root = root; | 720 extent->root = root; |
| 716 // The new structures are all located inside a fresh system page so they | 721 // The new structures are all located inside a fresh system page so they |
| 717 // will all be zeroed out. These DCHECKs are for documentation. | 722 // will all be zeroed out. These DCHECKs are for documentation. |
| 718 DCHECK(!extent->superPageBase); | 723 DCHECK(!extent->super_page_base); |
| 719 DCHECK(!extent->superPagesEnd); | 724 DCHECK(!extent->super_pages_end); |
| 720 DCHECK(!extent->next); | 725 DCHECK(!extent->next); |
| 721 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); | 726 PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot); |
| 722 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( | 727 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( |
| 723 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); | 728 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); |
| 724 DCHECK(!page->nextPage); | 729 DCHECK(!page->next_page); |
| 725 DCHECK(!page->numAllocatedSlots); | 730 DCHECK(!page->num_allocated_slots); |
| 726 DCHECK(!page->numUnprovisionedSlots); | 731 DCHECK(!page->num_unprovisioned_slots); |
| 727 DCHECK(!page->pageOffset); | 732 DCHECK(!page->page_offset); |
| 728 DCHECK(!page->emptyCacheIndex); | 733 DCHECK(!page->empty_cache_index); |
| 729 page->bucket = bucket; | 734 page->bucket = bucket; |
| 730 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); | 735 page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 731 PartitionFreelistEntry* nextEntry = | 736 PartitionFreelistEntry* nextEntry = |
| 732 reinterpret_cast<PartitionFreelistEntry*>(slot); | 737 reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 733 nextEntry->next = partitionFreelistMask(0); | 738 nextEntry->next = PartitionFreelistMask(0); |
| 734 | 739 |
| 735 DCHECK(!bucket->activePagesHead); | 740 DCHECK(!bucket->active_pages_head); |
| 736 DCHECK(!bucket->emptyPagesHead); | 741 DCHECK(!bucket->empty_pages_head); |
| 737 DCHECK(!bucket->decommittedPagesHead); | 742 DCHECK(!bucket->decommitted_pages_head); |
| 738 DCHECK(!bucket->numSystemPagesPerSlotSpan); | 743 DCHECK(!bucket->num_system_pages_per_slot_span); |
| 739 DCHECK(!bucket->numFullPages); | 744 DCHECK(!bucket->num_full_pages); |
| 740 bucket->slotSize = size; | 745 bucket->slot_size = size; |
| 741 | 746 |
| 742 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | 747 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); |
| 743 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | 748 mapExtent->map_size = map_size - kPartitionPageSize - kSystemPageSize; |
| 744 mapExtent->bucket = bucket; | 749 mapExtent->bucket = bucket; |
| 745 | 750 |
| 746 // Maintain the doubly-linked list of all direct mappings. | 751 // Maintain the doubly-linked list of all direct mappings. |
| 747 mapExtent->nextExtent = root->directMapList; | 752 mapExtent->next_extent = root->direct_map_list; |
| 748 if (mapExtent->nextExtent) | 753 if (mapExtent->next_extent) |
| 749 mapExtent->nextExtent->prevExtent = mapExtent; | 754 mapExtent->next_extent->prev_extent = mapExtent; |
| 750 mapExtent->prevExtent = nullptr; | 755 mapExtent->prev_extent = nullptr; |
| 751 root->directMapList = mapExtent; | 756 root->direct_map_list = mapExtent; |
| 752 | 757 |
| 753 return page; | 758 return page; |
| 754 } | 759 } |
| 755 | 760 |
| 756 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { | 761 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { |
| 757 PartitionRootBase* root = partitionPageToRoot(page); | 762 PartitionRootBase* root = PartitionPageToRoot(page); |
| 758 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); | 763 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); |
| 759 size_t unmapSize = extent->mapSize; | 764 size_t unmap_size = extent->map_size; |
| 760 | 765 |
| 761 // Maintain the doubly-linked list of all direct mappings. | 766 // Maintain the doubly-linked list of all direct mappings. |
| 762 if (extent->prevExtent) { | 767 if (extent->prev_extent) { |
| 763 DCHECK(extent->prevExtent->nextExtent == extent); | 768 DCHECK(extent->prev_extent->next_extent == extent); |
| 764 extent->prevExtent->nextExtent = extent->nextExtent; | 769 extent->prev_extent->next_extent = extent->next_extent; |
| 765 } else { | 770 } else { |
| 766 root->directMapList = extent->nextExtent; | 771 root->direct_map_list = extent->next_extent; |
| 767 } | 772 } |
| 768 if (extent->nextExtent) { | 773 if (extent->next_extent) { |
| 769 DCHECK(extent->nextExtent->prevExtent == extent); | 774 DCHECK(extent->next_extent->prev_extent == extent); |
| 770 extent->nextExtent->prevExtent = extent->prevExtent; | 775 extent->next_extent->prev_extent = extent->prev_extent; |
| 771 } | 776 } |
| 772 | 777 |
| 773 // Add on the size of the trailing guard page and preceeding partition | 778 // Add on the size of the trailing guard page and preceeding partition |
| 774 // page. | 779 // page. |
| 775 unmapSize += kPartitionPageSize + kSystemPageSize; | 780 unmap_size += kPartitionPageSize + kSystemPageSize; |
| 776 | 781 |
| 777 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; | 782 size_t uncommittedPageSize = page->bucket->slot_size + kSystemPageSize; |
| 778 partitionDecreaseCommittedPages(root, uncommittedPageSize); | 783 partitionDecreaseCommittedPages(root, uncommittedPageSize); |
| 779 DCHECK(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); | 784 DCHECK(root->total_size_of_direct_mapped_pages >= uncommittedPageSize); |
| 780 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; | 785 root->total_size_of_direct_mapped_pages -= uncommittedPageSize; |
| 781 | 786 |
| 782 DCHECK(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 787 DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask)); |
| 783 | 788 |
| 784 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 789 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 785 // Account for the mapping starting a partition page before the actual | 790 // Account for the mapping starting a partition page before the actual |
| 786 // allocation address. | 791 // allocation address. |
| 787 ptr -= kPartitionPageSize; | 792 ptr -= kPartitionPageSize; |
| 788 | 793 |
| 789 freePages(ptr, unmapSize); | 794 FreePages(ptr, unmap_size); |
| 790 } | 795 } |
| 791 | 796 |
| 792 void* partitionAllocSlowPath(PartitionRootBase* root, | 797 void* PartitionAllocSlowPath(PartitionRootBase* root, |
| 793 int flags, | 798 int flags, |
| 794 size_t size, | 799 size_t size, |
| 795 PartitionBucket* bucket) { | 800 PartitionBucket* bucket) { |
| 796 // The slow path is called when the freelist is empty. | 801 // The slow path is called when the freelist is empty. |
| 797 DCHECK(!bucket->activePagesHead->freelistHead); | 802 DCHECK(!bucket->active_pages_head->freelist_head); |
| 798 | 803 |
| 799 PartitionPage* newPage = nullptr; | 804 PartitionPage* newPage = nullptr; |
| 800 | 805 |
| 801 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 806 // For the PartitionAllocGeneric API, we have a bunch of buckets marked |
| 802 // as special cases. We bounce them through to the slow path so that we | 807 // as special cases. We bounce them through to the slow path so that we |
| 803 // can still have a blazing fast hot path due to lack of corner-case | 808 // can still have a blazing fast hot path due to lack of corner-case |
| 804 // branches. | 809 // branches. |
| 805 bool returnNull = flags & PartitionAllocReturnNull; | 810 bool returnNull = flags & PartitionAllocReturnNull; |
| 806 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 811 if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) { |
| 807 DCHECK(size > kGenericMaxBucketed); | 812 DCHECK(size > kGenericMaxBucketed); |
| 808 DCHECK(bucket == &PartitionRootBase::gPagedBucket); | 813 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
| 809 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 814 DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage); |
| 810 if (size > kGenericMaxDirectMapped) { | 815 if (size > kGenericMaxDirectMapped) { |
| 811 if (returnNull) | 816 if (returnNull) |
| 812 return nullptr; | 817 return nullptr; |
| 813 partitionExcessiveAllocationSize(); | 818 partitionExcessiveAllocationSize(); |
| 814 } | 819 } |
| 815 newPage = partitionDirectMap(root, flags, size); | 820 newPage = partitionDirectMap(root, flags, size); |
| 816 } else if (LIKELY(partitionSetNewActivePage(bucket))) { | 821 } else if (LIKELY(partitionSetNewActivePage(bucket))) { |
| 817 // First, did we find an active page in the active pages list? | 822 // First, did we find an active page in the active pages list? |
| 818 newPage = bucket->activePagesHead; | 823 newPage = bucket->active_pages_head; |
| 819 DCHECK(partitionPageStateIsActive(newPage)); | 824 DCHECK(PartitionPageStateIsActive(newPage)); |
| 820 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || | 825 } else if (LIKELY(bucket->empty_pages_head != nullptr) || |
| 821 LIKELY(bucket->decommittedPagesHead != nullptr)) { | 826 LIKELY(bucket->decommitted_pages_head != nullptr)) { |
| 822 // Second, look in our lists of empty and decommitted pages. | 827 // Second, look in our lists of empty and decommitted pages. |
| 823 // Check empty pages first, which are preferred, but beware that an | 828 // Check empty pages first, which are preferred, but beware that an |
| 824 // empty page might have been decommitted. | 829 // empty page might have been decommitted. |
| 825 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { | 830 while (LIKELY((newPage = bucket->empty_pages_head) != nullptr)) { |
| 826 DCHECK(newPage->bucket == bucket); | 831 DCHECK(newPage->bucket == bucket); |
| 827 DCHECK(partitionPageStateIsEmpty(newPage) || | 832 DCHECK(PartitionPageStateIsEmpty(newPage) || |
| 828 partitionPageStateIsDecommitted(newPage)); | 833 PartitionPageStateIsDecommitted(newPage)); |
| 829 bucket->emptyPagesHead = newPage->nextPage; | 834 bucket->empty_pages_head = newPage->next_page; |
| 830 // Accept the empty page unless it got decommitted. | 835 // Accept the empty page unless it got decommitted. |
| 831 if (newPage->freelistHead) { | 836 if (newPage->freelist_head) { |
| 832 newPage->nextPage = nullptr; | 837 newPage->next_page = nullptr; |
| 833 break; | 838 break; |
| 834 } | 839 } |
| 835 DCHECK(partitionPageStateIsDecommitted(newPage)); | 840 DCHECK(PartitionPageStateIsDecommitted(newPage)); |
| 836 newPage->nextPage = bucket->decommittedPagesHead; | 841 newPage->next_page = bucket->decommitted_pages_head; |
| 837 bucket->decommittedPagesHead = newPage; | 842 bucket->decommitted_pages_head = newPage; |
| 838 } | 843 } |
| 839 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { | 844 if (UNLIKELY(!newPage) && |
| 840 newPage = bucket->decommittedPagesHead; | 845 LIKELY(bucket->decommitted_pages_head != nullptr)) { |
| 846 newPage = bucket->decommitted_pages_head; |
| 841 DCHECK(newPage->bucket == bucket); | 847 DCHECK(newPage->bucket == bucket); |
| 842 DCHECK(partitionPageStateIsDecommitted(newPage)); | 848 DCHECK(PartitionPageStateIsDecommitted(newPage)); |
| 843 bucket->decommittedPagesHead = newPage->nextPage; | 849 bucket->decommitted_pages_head = newPage->next_page; |
| 844 void* addr = partitionPageToPointer(newPage); | 850 void* addr = PartitionPageToPointer(newPage); |
| 845 partitionRecommitSystemPages(root, addr, | 851 partitionRecommitSystemPages(root, addr, |
| 846 partitionBucketBytes(newPage->bucket)); | 852 PartitionBucketBytes(newPage->bucket)); |
| 847 partitionPageReset(newPage); | 853 partitionPageReset(newPage); |
| 848 } | 854 } |
| 849 DCHECK(newPage); | 855 DCHECK(newPage); |
| 850 } else { | 856 } else { |
| 851 // Third. If we get here, we need a brand new page. | 857 // Third. If we get here, we need a brand new page. |
| 852 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 858 uint16_t num_partition_pages = partitionBucketPartitionPages(bucket); |
| 853 void* rawPages = | 859 void* rawPages = |
| 854 partitionAllocPartitionPages(root, flags, numPartitionPages); | 860 PartitionAllocPartitionPages(root, flags, num_partition_pages); |
| 855 if (LIKELY(rawPages != nullptr)) { | 861 if (LIKELY(rawPages != nullptr)) { |
| 856 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); | 862 newPage = PartitionPointerToPageNoAlignmentCheck(rawPages); |
| 857 partitionPageSetup(newPage, bucket); | 863 partitionPageSetup(newPage, bucket); |
| 858 } | 864 } |
| 859 } | 865 } |
| 860 | 866 |
| 861 // Bail if we had a memory allocation failure. | 867 // Bail if we had a memory allocation failure. |
| 862 if (UNLIKELY(!newPage)) { | 868 if (UNLIKELY(!newPage)) { |
| 863 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 869 DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage); |
| 864 if (returnNull) | 870 if (returnNull) |
| 865 return nullptr; | 871 return nullptr; |
| 866 partitionOutOfMemory(root); | 872 partitionOutOfMemory(root); |
| 867 } | 873 } |
| 868 | 874 |
| 869 bucket = newPage->bucket; | 875 bucket = newPage->bucket; |
| 870 DCHECK(bucket != &PartitionRootBase::gPagedBucket); | 876 DCHECK(bucket != &PartitionRootBase::gPagedBucket); |
| 871 bucket->activePagesHead = newPage; | 877 bucket->active_pages_head = newPage; |
| 872 partitionPageSetRawSize(newPage, size); | 878 partitionPageSetRawSize(newPage, size); |
| 873 | 879 |
| 874 // If we found an active page with free slots, or an empty page, we have a | 880 // If we found an active page with free slots, or an empty page, we have a |
| 875 // usable freelist head. | 881 // usable freelist head. |
| 876 if (LIKELY(newPage->freelistHead != nullptr)) { | 882 if (LIKELY(newPage->freelist_head != nullptr)) { |
| 877 PartitionFreelistEntry* entry = newPage->freelistHead; | 883 PartitionFreelistEntry* entry = newPage->freelist_head; |
| 878 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); | 884 PartitionFreelistEntry* newHead = PartitionFreelistMask(entry->next); |
| 879 newPage->freelistHead = newHead; | 885 newPage->freelist_head = newHead; |
| 880 newPage->numAllocatedSlots++; | 886 newPage->num_allocated_slots++; |
| 881 return entry; | 887 return entry; |
| 882 } | 888 } |
| 883 // Otherwise, we need to build the freelist. | 889 // Otherwise, we need to build the freelist. |
| 884 DCHECK(newPage->numUnprovisionedSlots); | 890 DCHECK(newPage->num_unprovisioned_slots); |
| 885 return partitionPageAllocAndFillFreelist(newPage); | 891 return partitionPageAllocAndFillFreelist(newPage); |
| 886 } | 892 } |
| 887 | 893 |
| 888 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, | 894 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, |
| 889 PartitionPage* page) { | 895 PartitionPage* page) { |
| 890 DCHECK(partitionPageStateIsEmpty(page)); | 896 DCHECK(PartitionPageStateIsEmpty(page)); |
| 891 DCHECK(!partitionBucketIsDirectMapped(page->bucket)); | 897 DCHECK(!PartitionBucketIsDirectMapped(page->bucket)); |
| 892 void* addr = partitionPageToPointer(page); | 898 void* addr = PartitionPageToPointer(page); |
| 893 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); | 899 partitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket)); |
| 894 | 900 |
| 895 // We actually leave the decommitted page in the active list. We'll sweep | 901 // We actually leave the decommitted page in the active list. We'll sweep |
| 896 // it on to the decommitted page list when we next walk the active page | 902 // it on to the decommitted page list when we next walk the active page |
| 897 // list. | 903 // list. |
| 898 // Pulling this trick enables us to use a singly-linked page list for all | 904 // Pulling this trick enables us to use a singly-linked page list for all |
| 899 // cases, which is critical in keeping the page metadata structure down to | 905 // cases, which is critical in keeping the page metadata structure down to |
| 900 // 32 bytes in size. | 906 // 32 bytes in size. |
| 901 page->freelistHead = 0; | 907 page->freelist_head = 0; |
| 902 page->numUnprovisionedSlots = 0; | 908 page->num_unprovisioned_slots = 0; |
| 903 DCHECK(partitionPageStateIsDecommitted(page)); | 909 DCHECK(PartitionPageStateIsDecommitted(page)); |
| 904 } | 910 } |
| 905 | 911 |
| 906 static void partitionDecommitPageIfPossible(PartitionRootBase* root, | 912 static void partitionDecommitPageIfPossible(PartitionRootBase* root, |
| 907 PartitionPage* page) { | 913 PartitionPage* page) { |
| 908 DCHECK(page->emptyCacheIndex >= 0); | 914 DCHECK(page->empty_cache_index >= 0); |
| 909 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 915 DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans); |
| 910 DCHECK(page == root->globalEmptyPageRing[page->emptyCacheIndex]); | 916 DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]); |
| 911 page->emptyCacheIndex = -1; | 917 page->empty_cache_index = -1; |
| 912 if (partitionPageStateIsEmpty(page)) | 918 if (PartitionPageStateIsEmpty(page)) |
| 913 partitionDecommitPage(root, page); | 919 partitionDecommitPage(root, page); |
| 914 } | 920 } |
| 915 | 921 |
| 916 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { | 922 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { |
| 917 DCHECK(partitionPageStateIsEmpty(page)); | 923 DCHECK(PartitionPageStateIsEmpty(page)); |
| 918 PartitionRootBase* root = partitionPageToRoot(page); | 924 PartitionRootBase* root = PartitionPageToRoot(page); |
| 919 | 925 |
| 920 // If the page is already registered as empty, give it another life. | 926 // If the page is already registered as empty, give it another life. |
| 921 if (page->emptyCacheIndex != -1) { | 927 if (page->empty_cache_index != -1) { |
| 922 DCHECK(page->emptyCacheIndex >= 0); | 928 DCHECK(page->empty_cache_index >= 0); |
| 923 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 929 DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans); |
| 924 DCHECK(root->globalEmptyPageRing[page->emptyCacheIndex] == page); | 930 DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page); |
| 925 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; | 931 root->global_empty_page_ring[page->empty_cache_index] = 0; |
| 926 } | 932 } |
| 927 | 933 |
| 928 int16_t currentIndex = root->globalEmptyPageRingIndex; | 934 int16_t currentIndex = root->global_empty_page_ring_index; |
| 929 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; | 935 PartitionPage* pageToDecommit = root->global_empty_page_ring[currentIndex]; |
| 930 // The page might well have been re-activated, filled up, etc. before we get | 936 // The page might well have been re-activated, filled up, etc. before we get |
| 931 // around to looking at it here. | 937 // around to looking at it here. |
| 932 if (pageToDecommit) | 938 if (pageToDecommit) |
| 933 partitionDecommitPageIfPossible(root, pageToDecommit); | 939 partitionDecommitPageIfPossible(root, pageToDecommit); |
| 934 | 940 |
| 935 // We put the empty slot span on our global list of "pages that were once | 941 // We put the empty slot span on our global list of "pages that were once |
| 936 // empty". thus providing it a bit of breathing room to get re-used before | 942 // empty". thus providing it a bit of breathing room to get re-used before |
| 937 // we really free it. This improves performance, particularly on Mac OS X | 943 // we really free it. This improves performance, particularly on Mac OS X |
| 938 // which has subpar memory management performance. | 944 // which has subpar memory management performance. |
| 939 root->globalEmptyPageRing[currentIndex] = page; | 945 root->global_empty_page_ring[currentIndex] = page; |
| 940 page->emptyCacheIndex = currentIndex; | 946 page->empty_cache_index = currentIndex; |
| 941 ++currentIndex; | 947 ++currentIndex; |
| 942 if (currentIndex == kMaxFreeableSpans) | 948 if (currentIndex == kMaxFreeableSpans) |
| 943 currentIndex = 0; | 949 currentIndex = 0; |
| 944 root->globalEmptyPageRingIndex = currentIndex; | 950 root->global_empty_page_ring_index = currentIndex; |
| 945 } | 951 } |
| 946 | 952 |
| 947 static void partitionDecommitEmptyPages(PartitionRootBase* root) { | 953 static void partitionDecommitEmptyPages(PartitionRootBase* root) { |
| 948 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 954 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 949 PartitionPage* page = root->globalEmptyPageRing[i]; | 955 PartitionPage* page = root->global_empty_page_ring[i]; |
| 950 if (page) | 956 if (page) |
| 951 partitionDecommitPageIfPossible(root, page); | 957 partitionDecommitPageIfPossible(root, page); |
| 952 root->globalEmptyPageRing[i] = nullptr; | 958 root->global_empty_page_ring[i] = nullptr; |
| 953 } | 959 } |
| 954 } | 960 } |
| 955 | 961 |
| 956 void partitionFreeSlowPath(PartitionPage* page) { | 962 void PartitionFreeSlowPath(PartitionPage* page) { |
| 957 PartitionBucket* bucket = page->bucket; | 963 PartitionBucket* bucket = page->bucket; |
| 958 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 964 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 959 if (LIKELY(page->numAllocatedSlots == 0)) { | 965 if (LIKELY(page->num_allocated_slots == 0)) { |
| 960 // Page became fully unused. | 966 // Page became fully unused. |
| 961 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 967 if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) { |
| 962 partitionDirectUnmap(page); | 968 partitionDirectUnmap(page); |
| 963 return; | 969 return; |
| 964 } | 970 } |
| 965 // If it's the current active page, change it. We bounce the page to | 971 // If it's the current active page, change it. We bounce the page to |
| 966 // the empty list as a force towards defragmentation. | 972 // the empty list as a force towards defragmentation. |
| 967 if (LIKELY(page == bucket->activePagesHead)) | 973 if (LIKELY(page == bucket->active_pages_head)) |
| 968 (void)partitionSetNewActivePage(bucket); | 974 (void)partitionSetNewActivePage(bucket); |
| 969 DCHECK(bucket->activePagesHead != page); | 975 DCHECK(bucket->active_pages_head != page); |
| 970 | 976 |
| 971 partitionPageSetRawSize(page, 0); | 977 partitionPageSetRawSize(page, 0); |
| 972 DCHECK(!partitionPageGetRawSize(page)); | 978 DCHECK(!PartitionPageGetRawSize(page)); |
| 973 | 979 |
| 974 partitionRegisterEmptyPage(page); | 980 partitionRegisterEmptyPage(page); |
| 975 } else { | 981 } else { |
| 976 DCHECK(!partitionBucketIsDirectMapped(bucket)); | 982 DCHECK(!PartitionBucketIsDirectMapped(bucket)); |
| 977 // Ensure that the page is full. That's the only valid case if we | 983 // Ensure that the page is full. That's the only valid case if we |
| 978 // arrive here. | 984 // arrive here. |
| 979 DCHECK(page->numAllocatedSlots < 0); | 985 DCHECK(page->num_allocated_slots < 0); |
| 980 // A transition of numAllocatedSlots from 0 to -1 is not legal, and | 986 // A transition of num_allocated_slots from 0 to -1 is not legal, and |
| 981 // likely indicates a double-free. | 987 // likely indicates a double-free. |
| 982 CHECK(page->numAllocatedSlots != -1); | 988 CHECK(page->num_allocated_slots != -1); |
| 983 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | 989 page->num_allocated_slots = -page->num_allocated_slots - 2; |
| 984 DCHECK(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); | 990 DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1); |
| 985 // Fully used page became partially used. It must be put back on the | 991 // Fully used page became partially used. It must be put back on the |
| 986 // non-full page list. Also make it the current page to increase the | 992 // non-full page list. Also make it the current page to increase the |
| 987 // chances of it being filled up again. The old current page will be | 993 // chances of it being filled up again. The old current page will be |
| 988 // the next page. | 994 // the next page. |
| 989 DCHECK(!page->nextPage); | 995 DCHECK(!page->next_page); |
| 990 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) | 996 if (LIKELY(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage)) |
| 991 page->nextPage = bucket->activePagesHead; | 997 page->next_page = bucket->active_pages_head; |
| 992 bucket->activePagesHead = page; | 998 bucket->active_pages_head = page; |
| 993 --bucket->numFullPages; | 999 --bucket->num_full_pages; |
| 994 // Special case: for a partition page with just a single slot, it may | 1000 // Special case: for a partition page with just a single slot, it may |
| 995 // now be empty and we want to run it through the empty logic. | 1001 // now be empty and we want to run it through the empty logic. |
| 996 if (UNLIKELY(page->numAllocatedSlots == 0)) | 1002 if (UNLIKELY(page->num_allocated_slots == 0)) |
| 997 partitionFreeSlowPath(page); | 1003 PartitionFreeSlowPath(page); |
| 998 } | 1004 } |
| 999 } | 1005 } |
| 1000 | 1006 |
| 1001 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, | 1007 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, |
| 1002 PartitionPage* page, | 1008 PartitionPage* page, |
| 1003 size_t rawSize) { | 1009 size_t raw_size) { |
| 1004 DCHECK(partitionBucketIsDirectMapped(page->bucket)); | 1010 DCHECK(PartitionBucketIsDirectMapped(page->bucket)); |
| 1005 | 1011 |
| 1006 rawSize = partitionCookieSizeAdjustAdd(rawSize); | 1012 raw_size = PartitionCookieSizeAdjustAdd(raw_size); |
| 1007 | 1013 |
| 1008 // Note that the new size might be a bucketed size; this function is called | 1014 // Note that the new size might be a bucketed size; this function is called |
| 1009 // whenever we're reallocating a direct mapped allocation. | 1015 // whenever we're reallocating a direct mapped allocation. |
| 1010 size_t newSize = partitionDirectMapSize(rawSize); | 1016 size_t new_size = PartitionDirectMapSize(raw_size); |
| 1011 if (newSize < kGenericMinDirectMappedDownsize) | 1017 if (new_size < kGenericMinDirectMappedDownsize) |
| 1012 return false; | 1018 return false; |
| 1013 | 1019 |
| 1014 // bucket->slotSize is the current size of the allocation. | 1020 // bucket->slot_size is the current size of the allocation. |
| 1015 size_t currentSize = page->bucket->slotSize; | 1021 size_t current_size = page->bucket->slot_size; |
| 1016 if (newSize == currentSize) | 1022 if (new_size == current_size) |
| 1017 return true; | 1023 return true; |
| 1018 | 1024 |
| 1019 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); | 1025 char* char_ptr = static_cast<char*>(PartitionPageToPointer(page)); |
| 1020 | 1026 |
| 1021 if (newSize < currentSize) { | 1027 if (new_size < current_size) { |
| 1022 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; | 1028 size_t map_size = partitionPageToDirectMapExtent(page)->map_size; |
| 1023 | 1029 |
| 1024 // Don't reallocate in-place if new size is less than 80 % of the full | 1030 // Don't reallocate in-place if new size is less than 80 % of the full |
| 1025 // map size, to avoid holding on to too much unused address space. | 1031 // map size, to avoid holding on to too much unused address space. |
| 1026 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) | 1032 if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4) |
| 1027 return false; | 1033 return false; |
| 1028 | 1034 |
| 1029 // Shrink by decommitting unneeded pages and making them inaccessible. | 1035 // Shrink by decommitting unneeded pages and making them inaccessible. |
| 1030 size_t decommitSize = currentSize - newSize; | 1036 size_t decommitSize = current_size - new_size; |
| 1031 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); | 1037 partitionDecommitSystemPages(root, char_ptr + new_size, decommitSize); |
| 1032 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | 1038 SetSystemPagesInaccessible(char_ptr + new_size, decommitSize); |
| 1033 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | 1039 } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) { |
| 1034 // Grow within the actually allocated memory. Just need to make the | 1040 // Grow within the actually allocated memory. Just need to make the |
| 1035 // pages accessible again. | 1041 // pages accessible again. |
| 1036 size_t recommitSize = newSize - currentSize; | 1042 size_t recommit_size = new_size - current_size; |
| 1037 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); | 1043 bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size); |
| 1038 CHECK(ret); | 1044 CHECK(ret); |
| 1039 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); | 1045 partitionRecommitSystemPages(root, char_ptr + current_size, recommit_size); |
| 1040 | 1046 |
| 1041 #if DCHECK_IS_ON() | 1047 #if DCHECK_IS_ON() |
| 1042 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 1048 memset(char_ptr + current_size, kUninitializedByte, recommit_size); |
| 1043 #endif | 1049 #endif |
| 1044 } else { | 1050 } else { |
| 1045 // We can't perform the realloc in-place. | 1051 // We can't perform the realloc in-place. |
| 1046 // TODO: support this too when possible. | 1052 // TODO: support this too when possible. |
| 1047 return false; | 1053 return false; |
| 1048 } | 1054 } |
| 1049 | 1055 |
| 1050 #if DCHECK_IS_ON() | 1056 #if DCHECK_IS_ON() |
| 1051 // Write a new trailing cookie. | 1057 // Write a new trailing cookie. |
| 1052 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); | 1058 PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize); |
| 1053 #endif | 1059 #endif |
| 1054 | 1060 |
| 1055 partitionPageSetRawSize(page, rawSize); | 1061 partitionPageSetRawSize(page, raw_size); |
| 1056 DCHECK(partitionPageGetRawSize(page) == rawSize); | 1062 DCHECK(PartitionPageGetRawSize(page) == raw_size); |
| 1057 | 1063 |
| 1058 page->bucket->slotSize = newSize; | 1064 page->bucket->slot_size = new_size; |
| 1059 return true; | 1065 return true; |
| 1060 } | 1066 } |
| 1061 | 1067 |
| 1062 void* partitionReallocGeneric(PartitionRootGeneric* root, | 1068 void* PartitionReallocGeneric(PartitionRootGeneric* root, |
| 1063 void* ptr, | 1069 void* ptr, |
| 1064 size_t newSize, | 1070 size_t new_size, |
| 1065 const char* typeName) { | 1071 const char* type_name) { |
| 1066 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 1072 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 1067 return realloc(ptr, newSize); | 1073 return realloc(ptr, new_size); |
| 1068 #else | 1074 #else |
| 1069 if (UNLIKELY(!ptr)) | 1075 if (UNLIKELY(!ptr)) |
| 1070 return partitionAllocGeneric(root, newSize, typeName); | 1076 return PartitionAllocGeneric(root, new_size, type_name); |
| 1071 if (UNLIKELY(!newSize)) { | 1077 if (UNLIKELY(!new_size)) { |
| 1072 partitionFreeGeneric(root, ptr); | 1078 PartitionFreeGeneric(root, ptr); |
| 1073 return 0; | 1079 return 0; |
| 1074 } | 1080 } |
| 1075 | 1081 |
| 1076 if (newSize > kGenericMaxDirectMapped) | 1082 if (new_size > kGenericMaxDirectMapped) |
| 1077 partitionExcessiveAllocationSize(); | 1083 partitionExcessiveAllocationSize(); |
| 1078 | 1084 |
| 1079 DCHECK(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); | 1085 DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr))); |
| 1080 | 1086 |
| 1081 PartitionPage* page = | 1087 PartitionPage* page = |
| 1082 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1088 PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); |
| 1083 | 1089 |
| 1084 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { | 1090 if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) { |
| 1085 // We may be able to perform the realloc in place by changing the | 1091 // We may be able to perform the realloc in place by changing the |
| 1086 // accessibility of memory pages and, if reducing the size, decommitting | 1092 // accessibility of memory pages and, if reducing the size, decommitting |
| 1087 // them. | 1093 // them. |
| 1088 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { | 1094 if (partitionReallocDirectMappedInPlace(root, page, new_size)) { |
| 1089 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); | 1095 PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name); |
| 1090 return ptr; | 1096 return ptr; |
| 1091 } | 1097 } |
| 1092 } | 1098 } |
| 1093 | 1099 |
| 1094 size_t actualNewSize = partitionAllocActualSize(root, newSize); | 1100 size_t actualNewSize = PartitionAllocActualSize(root, new_size); |
| 1095 size_t actualOldSize = partitionAllocGetSize(ptr); | 1101 size_t actualOldSize = PartitionAllocGetSize(ptr); |
| 1096 | 1102 |
| 1097 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the | 1103 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the |
| 1098 // new size is a significant percentage smaller. We could do the same if we | 1104 // new size is a significant percentage smaller. We could do the same if we |
| 1099 // determine it is a win. | 1105 // determine it is a win. |
| 1100 if (actualNewSize == actualOldSize) { | 1106 if (actualNewSize == actualOldSize) { |
| 1101 // Trying to allocate a block of size newSize would give us a block of | 1107 // Trying to allocate a block of size new_size would give us a block of |
| 1102 // the same size as the one we've already got, so no point in doing | 1108 // the same size as the one we've already got, so no point in doing |
| 1103 // anything here. | 1109 // anything here. |
| 1104 return ptr; | 1110 return ptr; |
| 1105 } | 1111 } |
| 1106 | 1112 |
| 1107 // This realloc cannot be resized in-place. Sadness. | 1113 // This realloc cannot be resized in-place. Sadness. |
| 1108 void* ret = partitionAllocGeneric(root, newSize, typeName); | 1114 void* ret = PartitionAllocGeneric(root, new_size, type_name); |
| 1109 size_t copySize = actualOldSize; | 1115 size_t copy_size = actualOldSize; |
| 1110 if (newSize < copySize) | 1116 if (new_size < copy_size) |
| 1111 copySize = newSize; | 1117 copy_size = new_size; |
| 1112 | 1118 |
| 1113 memcpy(ret, ptr, copySize); | 1119 memcpy(ret, ptr, copy_size); |
| 1114 partitionFreeGeneric(root, ptr); | 1120 PartitionFreeGeneric(root, ptr); |
| 1115 return ret; | 1121 return ret; |
| 1116 #endif | 1122 #endif |
| 1117 } | 1123 } |
| 1118 | 1124 |
| 1119 static size_t partitionPurgePage(PartitionPage* page, bool discard) { | 1125 static size_t PartitionPurgePage(PartitionPage* page, bool discard) { |
| 1120 const PartitionBucket* bucket = page->bucket; | 1126 const PartitionBucket* bucket = page->bucket; |
| 1121 size_t slotSize = bucket->slotSize; | 1127 size_t slot_size = bucket->slot_size; |
| 1122 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) | 1128 if (slot_size < kSystemPageSize || !page->num_allocated_slots) |
| 1123 return 0; | 1129 return 0; |
| 1124 | 1130 |
| 1125 size_t bucketNumSlots = partitionBucketSlots(bucket); | 1131 size_t bucket_num_slots = PartitionBucketSlots(bucket); |
| 1126 size_t discardableBytes = 0; | 1132 size_t discardable_bytes = 0; |
| 1127 | 1133 |
| 1128 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1134 size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1129 if (rawSize) { | 1135 if (raw_size) { |
| 1130 uint32_t usedBytes = static_cast<uint32_t>(roundUpToSystemPage(rawSize)); | 1136 uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size)); |
| 1131 discardableBytes = bucket->slotSize - usedBytes; | 1137 discardable_bytes = bucket->slot_size - usedBytes; |
| 1132 if (discardableBytes && discard) { | 1138 if (discardable_bytes && discard) { |
| 1133 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1139 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 1134 ptr += usedBytes; | 1140 ptr += usedBytes; |
| 1135 discardSystemPages(ptr, discardableBytes); | 1141 DiscardSystemPages(ptr, discardable_bytes); |
| 1136 } | 1142 } |
| 1137 return discardableBytes; | 1143 return discardable_bytes; |
| 1138 } | 1144 } |
| 1139 | 1145 |
| 1140 const size_t maxSlotCount = | 1146 const size_t maxSlotCount = |
| 1141 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; | 1147 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; |
| 1142 DCHECK(bucketNumSlots <= maxSlotCount); | 1148 DCHECK(bucket_num_slots <= maxSlotCount); |
| 1143 DCHECK(page->numUnprovisionedSlots < bucketNumSlots); | 1149 DCHECK(page->num_unprovisioned_slots < bucket_num_slots); |
| 1144 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; | 1150 size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots; |
| 1145 char slotUsage[maxSlotCount]; | 1151 char slotUsage[maxSlotCount]; |
| 1146 size_t lastSlot = static_cast<size_t>(-1); | 1152 size_t lastSlot = static_cast<size_t>(-1); |
| 1147 memset(slotUsage, 1, numSlots); | 1153 memset(slotUsage, 1, num_slots); |
| 1148 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1154 char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); |
| 1149 PartitionFreelistEntry* entry = page->freelistHead; | 1155 PartitionFreelistEntry* entry = page->freelist_head; |
| 1150 // First, walk the freelist for this page and make a bitmap of which slots | 1156 // First, walk the freelist for this page and make a bitmap of which slots |
| 1151 // are not in use. | 1157 // are not in use. |
| 1152 while (entry) { | 1158 while (entry) { |
| 1153 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; | 1159 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size; |
| 1154 DCHECK(slotIndex < numSlots); | 1160 DCHECK(slotIndex < num_slots); |
| 1155 slotUsage[slotIndex] = 0; | 1161 slotUsage[slotIndex] = 0; |
| 1156 entry = partitionFreelistMask(entry->next); | 1162 entry = PartitionFreelistMask(entry->next); |
| 1157 // If we have a slot where the masked freelist entry is 0, we can | 1163 // If we have a slot where the masked freelist entry is 0, we can |
| 1158 // actually discard that freelist entry because touching a discarded | 1164 // actually discard that freelist entry because touching a discarded |
| 1159 // page is guaranteed to return original content or 0. | 1165 // page is guaranteed to return original content or 0. |
| 1160 // (Note that this optimization won't fire on big endian machines | 1166 // (Note that this optimization won't fire on big endian machines |
| 1161 // because the masking function is negation.) | 1167 // because the masking function is negation.) |
| 1162 if (!partitionFreelistMask(entry)) | 1168 if (!PartitionFreelistMask(entry)) |
| 1163 lastSlot = slotIndex; | 1169 lastSlot = slotIndex; |
| 1164 } | 1170 } |
| 1165 | 1171 |
| 1166 // If the slot(s) at the end of the slot span are not in used, we can | 1172 // If the slot(s) at the end of the slot span are not in used, we can |
| 1167 // truncate them entirely and rewrite the freelist. | 1173 // truncate them entirely and rewrite the freelist. |
| 1168 size_t truncatedSlots = 0; | 1174 size_t truncatedSlots = 0; |
| 1169 while (!slotUsage[numSlots - 1]) { | 1175 while (!slotUsage[num_slots - 1]) { |
| 1170 truncatedSlots++; | 1176 truncatedSlots++; |
| 1171 numSlots--; | 1177 num_slots--; |
| 1172 DCHECK(numSlots); | 1178 DCHECK(num_slots); |
| 1173 } | 1179 } |
| 1174 // First, do the work of calculating the discardable bytes. Don't actually | 1180 // First, do the work of calculating the discardable bytes. Don't actually |
| 1175 // discard anything unless the discard flag was passed in. | 1181 // discard anything unless the discard flag was passed in. |
| 1176 char* beginPtr = nullptr; | 1182 char* beginPtr = nullptr; |
| 1177 char* endPtr = nullptr; | 1183 char* endPtr = nullptr; |
| 1178 size_t unprovisionedBytes = 0; | 1184 size_t unprovisionedBytes = 0; |
| 1179 if (truncatedSlots) { | 1185 if (truncatedSlots) { |
| 1180 beginPtr = ptr + (numSlots * slotSize); | 1186 beginPtr = ptr + (num_slots * slot_size); |
| 1181 endPtr = beginPtr + (slotSize * truncatedSlots); | 1187 endPtr = beginPtr + (slot_size * truncatedSlots); |
| 1182 beginPtr = reinterpret_cast<char*>( | 1188 beginPtr = reinterpret_cast<char*>( |
| 1183 roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1189 RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
| 1184 // We round the end pointer here up and not down because we're at the | 1190 // We round the end pointer here up and not down because we're at the |
| 1185 // end of a slot span, so we "own" all the way up the page boundary. | 1191 // end of a slot span, so we "own" all the way up the page boundary. |
| 1186 endPtr = reinterpret_cast<char*>( | 1192 endPtr = reinterpret_cast<char*>( |
| 1187 roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1193 RoundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); |
| 1188 DCHECK(endPtr <= ptr + partitionBucketBytes(bucket)); | 1194 DCHECK(endPtr <= ptr + PartitionBucketBytes(bucket)); |
| 1189 if (beginPtr < endPtr) { | 1195 if (beginPtr < endPtr) { |
| 1190 unprovisionedBytes = endPtr - beginPtr; | 1196 unprovisionedBytes = endPtr - beginPtr; |
| 1191 discardableBytes += unprovisionedBytes; | 1197 discardable_bytes += unprovisionedBytes; |
| 1192 } | 1198 } |
| 1193 } | 1199 } |
| 1194 if (unprovisionedBytes && discard) { | 1200 if (unprovisionedBytes && discard) { |
| 1195 DCHECK(truncatedSlots > 0); | 1201 DCHECK(truncatedSlots > 0); |
| 1196 size_t numNewEntries = 0; | 1202 size_t numNewEntries = 0; |
| 1197 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); | 1203 page->num_unprovisioned_slots += static_cast<uint16_t>(truncatedSlots); |
| 1198 // Rewrite the freelist. | 1204 // Rewrite the freelist. |
| 1199 PartitionFreelistEntry** entryPtr = &page->freelistHead; | 1205 PartitionFreelistEntry** entryPtr = &page->freelist_head; |
| 1200 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { | 1206 for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) { |
| 1201 if (slotUsage[slotIndex]) | 1207 if (slotUsage[slotIndex]) |
| 1202 continue; | 1208 continue; |
| 1203 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( | 1209 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( |
| 1204 ptr + (slotSize * slotIndex)); | 1210 ptr + (slot_size * slotIndex)); |
| 1205 *entryPtr = partitionFreelistMask(entry); | 1211 *entryPtr = PartitionFreelistMask(entry); |
| 1206 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); | 1212 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); |
| 1207 numNewEntries++; | 1213 numNewEntries++; |
| 1208 } | 1214 } |
| 1209 // Terminate the freelist chain. | 1215 // Terminate the freelist chain. |
| 1210 *entryPtr = nullptr; | 1216 *entryPtr = nullptr; |
| 1211 // The freelist head is stored unmasked. | 1217 // The freelist head is stored unmasked. |
| 1212 page->freelistHead = partitionFreelistMask(page->freelistHead); | 1218 page->freelist_head = PartitionFreelistMask(page->freelist_head); |
| 1213 DCHECK(numNewEntries == numSlots - page->numAllocatedSlots); | 1219 DCHECK(numNewEntries == num_slots - page->num_allocated_slots); |
| 1214 // Discard the memory. | 1220 // Discard the memory. |
| 1215 discardSystemPages(beginPtr, unprovisionedBytes); | 1221 DiscardSystemPages(beginPtr, unprovisionedBytes); |
| 1216 } | 1222 } |
| 1217 | 1223 |
| 1218 // Next, walk the slots and for any not in use, consider where the system | 1224 // Next, walk the slots and for any not in use, consider where the system |
| 1219 // page boundaries occur. We can release any system pages back to the | 1225 // page boundaries occur. We can release any system pages back to the |
| 1220 // system as long as we don't interfere with a freelist pointer or an | 1226 // system as long as we don't interfere with a freelist pointer or an |
| 1221 // adjacent slot. | 1227 // adjacent slot. |
| 1222 for (size_t i = 0; i < numSlots; ++i) { | 1228 for (size_t i = 0; i < num_slots; ++i) { |
| 1223 if (slotUsage[i]) | 1229 if (slotUsage[i]) |
| 1224 continue; | 1230 continue; |
| 1225 // The first address we can safely discard is just after the freelist | 1231 // The first address we can safely discard is just after the freelist |
| 1226 // pointer. There's one quirk: if the freelist pointer is actually a | 1232 // pointer. There's one quirk: if the freelist pointer is actually a |
| 1227 // null, we can discard that pointer value too. | 1233 // null, we can discard that pointer value too. |
| 1228 char* beginPtr = ptr + (i * slotSize); | 1234 char* beginPtr = ptr + (i * slot_size); |
| 1229 char* endPtr = beginPtr + slotSize; | 1235 char* endPtr = beginPtr + slot_size; |
| 1230 if (i != lastSlot) | 1236 if (i != lastSlot) |
| 1231 beginPtr += sizeof(PartitionFreelistEntry); | 1237 beginPtr += sizeof(PartitionFreelistEntry); |
| 1232 beginPtr = reinterpret_cast<char*>( | 1238 beginPtr = reinterpret_cast<char*>( |
| 1233 roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1239 RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
| 1234 endPtr = reinterpret_cast<char*>( | 1240 endPtr = reinterpret_cast<char*>( |
| 1235 roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1241 RoundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); |
| 1236 if (beginPtr < endPtr) { | 1242 if (beginPtr < endPtr) { |
| 1237 size_t partialSlotBytes = endPtr - beginPtr; | 1243 size_t partialSlotBytes = endPtr - beginPtr; |
| 1238 discardableBytes += partialSlotBytes; | 1244 discardable_bytes += partialSlotBytes; |
| 1239 if (discard) | 1245 if (discard) |
| 1240 discardSystemPages(beginPtr, partialSlotBytes); | 1246 DiscardSystemPages(beginPtr, partialSlotBytes); |
| 1241 } | 1247 } |
| 1242 } | 1248 } |
| 1243 return discardableBytes; | 1249 return discardable_bytes; |
| 1244 } | 1250 } |
| 1245 | 1251 |
| 1246 static void partitionPurgeBucket(PartitionBucket* bucket) { | 1252 static void partitionPurgeBucket(PartitionBucket* bucket) { |
| 1247 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1253 if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) { |
| 1248 for (PartitionPage* page = bucket->activePagesHead; page; | 1254 for (PartitionPage* page = bucket->active_pages_head; page; |
| 1249 page = page->nextPage) { | 1255 page = page->next_page) { |
| 1250 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 1256 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 1251 (void)partitionPurgePage(page, true); | 1257 (void)PartitionPurgePage(page, true); |
| 1252 } | 1258 } |
| 1253 } | 1259 } |
| 1254 } | 1260 } |
| 1255 | 1261 |
| 1256 void partitionPurgeMemory(PartitionRoot* root, int flags) { | 1262 void PartitionPurgeMemory(PartitionRoot* root, int flags) { |
| 1257 if (flags & PartitionPurgeDecommitEmptyPages) | 1263 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1258 partitionDecommitEmptyPages(root); | 1264 partitionDecommitEmptyPages(root); |
| 1259 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | 1265 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
| 1260 // here because that flag is only useful for allocations >= system page | 1266 // here because that flag is only useful for allocations >= system page |
| 1261 // size. We only have allocations that large inside generic partitions | 1267 // size. We only have allocations that large inside generic partitions |
| 1262 // at the moment. | 1268 // at the moment. |
| 1263 } | 1269 } |
| 1264 | 1270 |
| 1265 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { | 1271 void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { |
| 1266 subtle::SpinLock::Guard guard(root->lock); | 1272 subtle::SpinLock::Guard guard(root->lock); |
| 1267 if (flags & PartitionPurgeDecommitEmptyPages) | 1273 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1268 partitionDecommitEmptyPages(root); | 1274 partitionDecommitEmptyPages(root); |
| 1269 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | 1275 if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
| 1270 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1276 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1271 PartitionBucket* bucket = &root->buckets[i]; | 1277 PartitionBucket* bucket = &root->buckets[i]; |
| 1272 if (bucket->slotSize >= kSystemPageSize) | 1278 if (bucket->slot_size >= kSystemPageSize) |
| 1273 partitionPurgeBucket(bucket); | 1279 partitionPurgeBucket(bucket); |
| 1274 } | 1280 } |
| 1275 } | 1281 } |
| 1276 } | 1282 } |
| 1277 | 1283 |
| 1278 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, | 1284 static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, |
| 1279 const PartitionPage* page) { | 1285 const PartitionPage* page) { |
| 1280 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1286 uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket); |
| 1281 | 1287 |
| 1282 if (partitionPageStateIsDecommitted(page)) { | 1288 if (PartitionPageStateIsDecommitted(page)) { |
| 1283 ++statsOut->numDecommittedPages; | 1289 ++stats_out->num_decommitted_pages; |
| 1284 return; | 1290 return; |
| 1285 } | 1291 } |
| 1286 | 1292 |
| 1287 statsOut->discardableBytes += | 1293 stats_out->discardable_bytes += |
| 1288 partitionPurgePage(const_cast<PartitionPage*>(page), false); | 1294 PartitionPurgePage(const_cast<PartitionPage*>(page), false); |
| 1289 | 1295 |
| 1290 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1296 size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1291 if (rawSize) | 1297 if (raw_size) |
| 1292 statsOut->activeBytes += static_cast<uint32_t>(rawSize); | 1298 stats_out->active_bytes += static_cast<uint32_t>(raw_size); |
| 1293 else | 1299 else |
| 1294 statsOut->activeBytes += | 1300 stats_out->active_bytes += |
| 1295 (page->numAllocatedSlots * statsOut->bucketSlotSize); | 1301 (page->num_allocated_slots * stats_out->bucket_slot_size); |
| 1296 | 1302 |
| 1297 size_t pageBytesResident = | 1303 size_t page_bytes_resident = |
| 1298 roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * | 1304 RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) * |
| 1299 statsOut->bucketSlotSize); | 1305 stats_out->bucket_slot_size); |
| 1300 statsOut->residentBytes += pageBytesResident; | 1306 stats_out->resident_bytes += page_bytes_resident; |
| 1301 if (partitionPageStateIsEmpty(page)) { | 1307 if (PartitionPageStateIsEmpty(page)) { |
| 1302 statsOut->decommittableBytes += pageBytesResident; | 1308 stats_out->decommittable_bytes += page_bytes_resident; |
| 1303 ++statsOut->numEmptyPages; | 1309 ++stats_out->num_empty_pages; |
| 1304 } else if (partitionPageStateIsFull(page)) { | 1310 } else if (PartitionPageStateIsFull(page)) { |
| 1305 ++statsOut->numFullPages; | 1311 ++stats_out->num_full_pages; |
| 1306 } else { | 1312 } else { |
| 1307 DCHECK(partitionPageStateIsActive(page)); | 1313 DCHECK(PartitionPageStateIsActive(page)); |
| 1308 ++statsOut->numActivePages; | 1314 ++stats_out->num_active_pages; |
| 1309 } | 1315 } |
| 1310 } | 1316 } |
| 1311 | 1317 |
| 1312 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, | 1318 static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, |
| 1313 const PartitionBucket* bucket) { | 1319 const PartitionBucket* bucket) { |
| 1314 DCHECK(!partitionBucketIsDirectMapped(bucket)); | 1320 DCHECK(!PartitionBucketIsDirectMapped(bucket)); |
| 1315 statsOut->isValid = false; | 1321 stats_out->is_valid = false; |
| 1316 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | 1322 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), |
| 1317 // the bucket might still need to be reported if it has a list of empty, | 1323 // the bucket might still need to be reported if it has a list of empty, |
| 1318 // decommitted or full pages. | 1324 // decommitted or full pages. |
| 1319 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && | 1325 if (bucket->active_pages_head == &PartitionRootGeneric::gSeedPage && |
| 1320 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && | 1326 !bucket->empty_pages_head && !bucket->decommitted_pages_head && |
| 1321 !bucket->numFullPages) | 1327 !bucket->num_full_pages) |
| 1322 return; | 1328 return; |
| 1323 | 1329 |
| 1324 memset(statsOut, '\0', sizeof(*statsOut)); | 1330 memset(stats_out, '\0', sizeof(*stats_out)); |
| 1325 statsOut->isValid = true; | 1331 stats_out->is_valid = true; |
| 1326 statsOut->isDirectMap = false; | 1332 stats_out->is_direct_map = false; |
| 1327 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); | 1333 stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages); |
| 1328 statsOut->bucketSlotSize = bucket->slotSize; | 1334 stats_out->bucket_slot_size = bucket->slot_size; |
| 1329 uint16_t bucketNumSlots = partitionBucketSlots(bucket); | 1335 uint16_t bucket_num_slots = PartitionBucketSlots(bucket); |
| 1330 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; | 1336 size_t bucketUsefulStorage = stats_out->bucket_slot_size * bucket_num_slots; |
| 1331 statsOut->allocatedPageSize = partitionBucketBytes(bucket); | 1337 stats_out->allocated_page_size = PartitionBucketBytes(bucket); |
| 1332 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; | 1338 stats_out->active_bytes = bucket->num_full_pages * bucketUsefulStorage; |
| 1333 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; | 1339 stats_out->resident_bytes = |
| 1340 bucket->num_full_pages * stats_out->allocated_page_size; |
| 1334 | 1341 |
| 1335 for (const PartitionPage* page = bucket->emptyPagesHead; page; | 1342 for (const PartitionPage* page = bucket->empty_pages_head; page; |
| 1336 page = page->nextPage) { | 1343 page = page->next_page) { |
| 1337 DCHECK(partitionPageStateIsEmpty(page) || | 1344 DCHECK(PartitionPageStateIsEmpty(page) || |
| 1338 partitionPageStateIsDecommitted(page)); | 1345 PartitionPageStateIsDecommitted(page)); |
| 1339 partitionDumpPageStats(statsOut, page); | 1346 PartitionDumpPageStats(stats_out, page); |
| 1340 } | 1347 } |
| 1341 for (const PartitionPage* page = bucket->decommittedPagesHead; page; | 1348 for (const PartitionPage* page = bucket->decommitted_pages_head; page; |
| 1342 page = page->nextPage) { | 1349 page = page->next_page) { |
| 1343 DCHECK(partitionPageStateIsDecommitted(page)); | 1350 DCHECK(PartitionPageStateIsDecommitted(page)); |
| 1344 partitionDumpPageStats(statsOut, page); | 1351 PartitionDumpPageStats(stats_out, page); |
| 1345 } | 1352 } |
| 1346 | 1353 |
| 1347 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1354 if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) { |
| 1348 for (const PartitionPage* page = bucket->activePagesHead; page; | 1355 for (const PartitionPage* page = bucket->active_pages_head; page; |
| 1349 page = page->nextPage) { | 1356 page = page->next_page) { |
| 1350 DCHECK(page != &PartitionRootGeneric::gSeedPage); | 1357 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 1351 partitionDumpPageStats(statsOut, page); | 1358 PartitionDumpPageStats(stats_out, page); |
| 1352 } | 1359 } |
| 1353 } | 1360 } |
| 1354 } | 1361 } |
| 1355 | 1362 |
| 1356 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, | 1363 void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, |
| 1357 const char* partitionName, | 1364 const char* partition_name, |
| 1358 bool isLightDump, | 1365 bool is_light_dump, |
| 1359 PartitionStatsDumper* partitionStatsDumper) { | 1366 PartitionStatsDumper* dumper) { |
| 1360 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1367 PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; |
| 1361 static const size_t kMaxReportableDirectMaps = 4096; | 1368 static const size_t kMaxReportableDirectMaps = 4096; |
| 1362 uint32_t directMapLengths[kMaxReportableDirectMaps]; | 1369 uint32_t direct_map_lengths[kMaxReportableDirectMaps]; |
| 1363 size_t numDirectMappedAllocations = 0; | 1370 size_t num_direct_mapped_allocations = 0; |
| 1364 | 1371 |
| 1365 { | 1372 { |
| 1366 subtle::SpinLock::Guard guard(partition->lock); | 1373 subtle::SpinLock::Guard guard(partition->lock); |
| 1367 | 1374 |
| 1368 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1375 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1369 const PartitionBucket* bucket = &partition->buckets[i]; | 1376 const PartitionBucket* bucket = &partition->buckets[i]; |
| 1370 // Don't report the pseudo buckets that the generic allocator sets up in | 1377 // Don't report the pseudo buckets that the generic allocator sets up in |
| 1371 // order to preserve a fast size->bucket map (see | 1378 // order to preserve a fast size->bucket map (see |
| 1372 // partitionAllocGenericInit for details). | 1379 // PartitionAllocGenericInit for details). |
| 1373 if (!bucket->activePagesHead) | 1380 if (!bucket->active_pages_head) |
| 1374 bucketStats[i].isValid = false; | 1381 bucket_stats[i].is_valid = false; |
| 1375 else | 1382 else |
| 1376 partitionDumpBucketStats(&bucketStats[i], bucket); | 1383 PartitionDumpBucketStats(&bucket_stats[i], bucket); |
| 1377 } | 1384 } |
| 1378 | 1385 |
| 1379 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; | 1386 for (PartitionDirectMapExtent* extent = partition->direct_map_list; extent; |
| 1380 extent = extent->nextExtent) { | 1387 extent = extent->next_extent) { |
| 1381 DCHECK(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | 1388 DCHECK(!extent->next_extent || |
| 1382 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | 1389 extent->next_extent->prev_extent == extent); |
| 1383 ++numDirectMappedAllocations; | 1390 direct_map_lengths[num_direct_mapped_allocations] = |
| 1384 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | 1391 extent->bucket->slot_size; |
| 1392 ++num_direct_mapped_allocations; |
| 1393 if (num_direct_mapped_allocations == kMaxReportableDirectMaps) |
| 1385 break; | 1394 break; |
| 1386 } | 1395 } |
| 1387 } | 1396 } |
| 1388 | 1397 |
| 1389 // partitionsDumpBucketStats is called after collecting stats because it | 1398 // Call |PartitionsDumpBucketStats| after collecting stats because it can try |
| 1390 // can try to allocate using PartitionAllocGeneric and it can't obtain the | 1399 // to allocate using |PartitionAllocGeneric| and it can't obtain the lock. |
| 1391 // lock. | 1400 PartitionMemoryStats stats = {0}; |
| 1392 PartitionMemoryStats partitionStats = {0}; | 1401 stats.total_mmapped_bytes = partition->total_size_of_super_pages + |
| 1393 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + | 1402 partition->total_size_of_direct_mapped_pages; |
| 1394 partition->totalSizeOfDirectMappedPages; | 1403 stats.total_committed_bytes = partition->total_size_of_committed_pages; |
| 1395 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | |
| 1396 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1404 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1397 if (bucketStats[i].isValid) { | 1405 if (bucket_stats[i].is_valid) { |
| 1398 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; | 1406 stats.total_resident_bytes += bucket_stats[i].resident_bytes; |
| 1399 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; | 1407 stats.total_active_bytes += bucket_stats[i].active_bytes; |
| 1400 partitionStats.totalDecommittableBytes += | 1408 stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes; |
| 1401 bucketStats[i].decommittableBytes; | 1409 stats.total_discardable_bytes += bucket_stats[i].discardable_bytes; |
| 1402 partitionStats.totalDiscardableBytes += bucketStats[i].discardableBytes; | 1410 if (!is_light_dump) |
| 1403 if (!isLightDump) | 1411 dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); |
| 1404 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | |
| 1405 &bucketStats[i]); | |
| 1406 } | 1412 } |
| 1407 } | 1413 } |
| 1408 | 1414 |
| 1409 size_t directMappedAllocationsTotalSize = 0; | 1415 size_t direct_mapped_allocations_total_size = 0; |
| 1410 for (size_t i = 0; i < numDirectMappedAllocations; ++i) { | 1416 for (size_t i = 0; i < num_direct_mapped_allocations; ++i) { |
| 1411 uint32_t size = directMapLengths[i]; | 1417 uint32_t size = direct_map_lengths[i]; |
| 1412 directMappedAllocationsTotalSize += size; | 1418 direct_mapped_allocations_total_size += size; |
| 1413 if (isLightDump) | 1419 if (is_light_dump) |
| 1414 continue; | 1420 continue; |
| 1415 | 1421 |
| 1416 PartitionBucketMemoryStats stats; | 1422 PartitionBucketMemoryStats stats; |
| 1417 memset(&stats, '\0', sizeof(stats)); | 1423 memset(&stats, '\0', sizeof(stats)); |
| 1418 stats.isValid = true; | 1424 stats.is_valid = true; |
| 1419 stats.isDirectMap = true; | 1425 stats.is_direct_map = true; |
| 1420 stats.numFullPages = 1; | 1426 stats.num_full_pages = 1; |
| 1421 stats.allocatedPageSize = size; | 1427 stats.allocated_page_size = size; |
| 1422 stats.bucketSlotSize = size; | 1428 stats.bucket_slot_size = size; |
| 1423 stats.activeBytes = size; | 1429 stats.active_bytes = size; |
| 1424 stats.residentBytes = size; | 1430 stats.resident_bytes = size; |
| 1425 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); | 1431 dumper->PartitionsDumpBucketStats(partition_name, &stats); |
| 1426 } | 1432 } |
| 1427 partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; | 1433 stats.total_resident_bytes += direct_mapped_allocations_total_size; |
| 1428 partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; | 1434 stats.total_active_bytes += direct_mapped_allocations_total_size; |
| 1429 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1435 dumper->PartitionDumpTotals(partition_name, &stats); |
| 1430 } | 1436 } |
| 1431 | 1437 |
| 1432 void partitionDumpStats(PartitionRoot* partition, | 1438 void PartitionDumpStats(PartitionRoot* partition, |
| 1433 const char* partitionName, | 1439 const char* partition_name, |
| 1434 bool isLightDump, | 1440 bool is_light_dump, |
| 1435 PartitionStatsDumper* partitionStatsDumper) { | 1441 PartitionStatsDumper* dumper) { |
| 1436 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); | 1442 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); |
| 1437 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; | 1443 PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets]; |
| 1438 const size_t partitionNumBuckets = partition->numBuckets; | 1444 const size_t partitionNumBuckets = partition->num_buckets; |
| 1439 DCHECK(partitionNumBuckets <= kMaxReportableBuckets); | 1445 DCHECK(partitionNumBuckets <= kMaxReportableBuckets); |
| 1440 | 1446 |
| 1441 for (size_t i = 0; i < partitionNumBuckets; ++i) | 1447 for (size_t i = 0; i < partitionNumBuckets; ++i) |
| 1442 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); | 1448 PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]); |
| 1443 | 1449 |
| 1444 // partitionsDumpBucketStats is called after collecting stats because it | 1450 // PartitionsDumpBucketStats is called after collecting stats because it |
| 1445 // can use PartitionAlloc to allocate and this can affect the statistics. | 1451 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1446 PartitionMemoryStats partitionStats = {0}; | 1452 PartitionMemoryStats stats = {0}; |
| 1447 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; | 1453 stats.total_mmapped_bytes = partition->total_size_of_super_pages; |
| 1448 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | 1454 stats.total_committed_bytes = partition->total_size_of_committed_pages; |
| 1449 DCHECK(!partition->totalSizeOfDirectMappedPages); | 1455 DCHECK(!partition->total_size_of_direct_mapped_pages); |
| 1450 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1456 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1451 if (memoryStats[i].isValid) { | 1457 if (memory_stats[i].is_valid) { |
| 1452 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; | 1458 stats.total_resident_bytes += memory_stats[i].resident_bytes; |
| 1453 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; | 1459 stats.total_active_bytes += memory_stats[i].active_bytes; |
| 1454 partitionStats.totalDecommittableBytes += | 1460 stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes; |
| 1455 memoryStats[i].decommittableBytes; | 1461 stats.total_discardable_bytes += memory_stats[i].discardable_bytes; |
| 1456 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; | 1462 if (!is_light_dump) |
| 1457 if (!isLightDump) | 1463 dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]); |
| 1458 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | |
| 1459 &memoryStats[i]); | |
| 1460 } | 1464 } |
| 1461 } | 1465 } |
| 1462 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1466 dumper->PartitionDumpTotals(partition_name, &stats); |
| 1463 } | 1467 } |
| 1464 | 1468 |
| 1465 } // namespace base | 1469 } // namespace base |
| OLD | NEW |