| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H | 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H | 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| 7 | 7 |
| 8 // DESCRIPTION | 8 // DESCRIPTION |
| 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / | 9 // partitionAlloc() / PartitionAllocGeneric() and PartitionFree() / |
| 10 // partitionFreeGeneric() are approximately analagous to malloc() and free(). | 10 // PartitionFreeGeneric() are approximately analagous to malloc() and free(). |
| 11 // | 11 // |
| 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object | 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object |
| 13 // must be supplied to these functions, representing a specific "heap partition" | 13 // must be supplied to these functions, representing a specific "heap partition" |
| 14 // that will be used to satisfy the allocation. Different partitions are | 14 // that will be used to satisfy the allocation. Different partitions are |
| 15 // guaranteed to exist in separate address spaces, including being separate from | 15 // guaranteed to exist in separate address spaces, including being separate from |
| 16 // the main system heap. If the contained objects are all freed, physical memory | 16 // the main system heap. If the contained objects are all freed, physical memory |
| 17 // is returned to the system but the address space remains reserved. | 17 // is returned to the system but the address space remains reserved. |
| 18 // See PartitionAlloc.md for other security properties PartitionAlloc provides. | 18 // See PartitionAlloc.md for other security properties PartitionAlloc provides. |
| 19 // | 19 // |
| 20 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE | 20 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE |
| 21 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To | 21 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To |
| 22 // minimize the instruction count to the fullest extent possible, the | 22 // minimize the instruction count to the fullest extent possible, the |
| 23 // PartitionRoot is really just a header adjacent to other data areas provided | 23 // PartitionRoot is really just a header adjacent to other data areas provided |
| 24 // by the allocator class. | 24 // by the allocator class. |
| 25 // | 25 // |
| 26 // The partitionAlloc() variant of the API has the following caveats: | 26 // The partitionAlloc() variant of the API has the following caveats: |
| 27 // - Allocations and frees against a single partition must be single threaded. | 27 // - Allocations and frees against a single partition must be single threaded. |
| 28 // - Allocations must not exceed a max size, chosen at compile-time via a | 28 // - Allocations must not exceed a max size, chosen at compile-time via a |
| 29 // templated parameter to PartitionAllocator. | 29 // templated parameter to PartitionAllocator. |
| 30 // - Allocation sizes must be aligned to the system pointer size. | 30 // - Allocation sizes must be aligned to the system pointer size. |
| 31 // - Allocations are bucketed exactly according to size. | 31 // - Allocations are bucketed exactly according to size. |
| 32 // | 32 // |
| 33 // And for partitionAllocGeneric(): | 33 // And for PartitionAllocGeneric(): |
| 34 // - Multi-threaded use against a single partition is ok; locking is handled. | 34 // - Multi-threaded use against a single partition is ok; locking is handled. |
| 35 // - Allocations of any arbitrary size can be handled (subject to a limit of | 35 // - Allocations of any arbitrary size can be handled (subject to a limit of |
| 36 // INT_MAX bytes for security reasons). | 36 // INT_MAX bytes for security reasons). |
| 37 // - Bucketing is by approximate size, for example an allocation of 4000 bytes | 37 // - Bucketing is by approximate size, for example an allocation of 4000 bytes |
| 38 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and | 38 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and |
| 39 // keep worst-case waste to ~10%. | 39 // keep worst-case waste to ~10%. |
| 40 // | 40 // |
| 41 // The allocators are designed to be extremely fast, thanks to the following | 41 // The allocators are designed to be extremely fast, thanks to the following |
| 42 // properties and design: | 42 // properties and design: |
| 43 // - Just two single (reasonably predicatable) branches in the hot / fast path | 43 // - Just two single (reasonably predicatable) branches in the hot / fast path |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 84 // for a partition page to be based on multiple system pages. Most references to | 84 // for a partition page to be based on multiple system pages. Most references to |
| 85 // "page" refer to partition pages. | 85 // "page" refer to partition pages. |
| 86 // We also have the concept of "super pages" -- these are the underlying system | 86 // We also have the concept of "super pages" -- these are the underlying system |
| 87 // allocations we make. Super pages contain multiple partition pages inside them | 87 // allocations we make. Super pages contain multiple partition pages inside them |
| 88 // and include space for a small amount of metadata per partition page. | 88 // and include space for a small amount of metadata per partition page. |
| 89 // Inside super pages, we store "slot spans". A slot span is a continguous range | 89 // Inside super pages, we store "slot spans". A slot span is a continguous range |
| 90 // of one or more partition pages that stores allocations of the same size. | 90 // of one or more partition pages that stores allocations of the same size. |
| 91 // Slot span sizes are adjusted depending on the allocation size, to make sure | 91 // Slot span sizes are adjusted depending on the allocation size, to make sure |
| 92 // the packing does not lead to unused (wasted) space at the end of the last | 92 // the packing does not lead to unused (wasted) space at the end of the last |
| 93 // system page of the span. For our current max slot span size of 64k and other | 93 // system page of the span. For our current max slot span size of 64k and other |
| 94 // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up | 94 // constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up |
| 95 // against the end of a system page. | 95 // against the end of a system page. |
| 96 static const size_t kPartitionPageShift = 14; // 16KB | 96 static const size_t kPartitionPageShift = 14; // 16KB |
| 97 static const size_t kPartitionPageSize = 1 << kPartitionPageShift; | 97 static const size_t kPartitionPageSize = 1 << kPartitionPageShift; |
| 98 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; | 98 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; |
| 99 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; | 99 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; |
| 100 static const size_t kMaxPartitionPagesPerSlotSpan = 4; | 100 static const size_t kMaxPartitionPagesPerSlotSpan = 4; |
| 101 | 101 |
| 102 // To avoid fragmentation via never-used freelist entries, we hand out partition | 102 // To avoid fragmentation via never-used freelist entries, we hand out partition |
| 103 // freelist sections gradually, in units of the dominant system page size. | 103 // freelist sections gradually, in units of the dominant system page size. |
| 104 // What we're actually doing is avoiding filling the full partition page (16 KB) | 104 // What we're actually doing is avoiding filling the full partition page (16 KB) |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 192 << (kGenericMinBucketedOrder - 1); | 192 << (kGenericMinBucketedOrder - 1); |
| 193 static const size_t kGenericMaxBucketSpacing = | 193 static const size_t kGenericMaxBucketSpacing = |
| 194 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); | 194 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); |
| 195 static const size_t kGenericMaxBucketed = | 195 static const size_t kGenericMaxBucketed = |
| 196 (1 << (kGenericMaxBucketedOrder - 1)) + | 196 (1 << (kGenericMaxBucketedOrder - 1)) + |
| 197 ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); | 197 ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); |
| 198 static const size_t kGenericMinDirectMappedDownsize = | 198 static const size_t kGenericMinDirectMappedDownsize = |
| 199 kGenericMaxBucketed + | 199 kGenericMaxBucketed + |
| 200 1; // Limit when downsizing a direct mapping using realloc(). | 200 1; // Limit when downsizing a direct mapping using realloc(). |
| 201 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; | 201 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; |
| 202 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; | 202 static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT; |
| 203 | 203 |
| 204 // Constants for the memory reclaim logic. | 204 // Constants for the memory reclaim logic. |
| 205 static const size_t kMaxFreeableSpans = 16; | 205 static const size_t kMaxFreeableSpans = 16; |
| 206 | 206 |
| 207 // If the total size in bytes of allocated but not committed pages exceeds this | 207 // If the total size in bytes of allocated but not committed pages exceeds this |
| 208 // value (probably it is a "out of virtual address space" crash), | 208 // value (probably it is a "out of virtual address space" crash), |
| 209 // a special crash stack trace is generated at |partitionOutOfMemory|. | 209 // a special crash stack trace is generated at |partitionOutOfMemory|. |
| 210 // This is to distinguish "out of virtual address space" from | 210 // This is to distinguish "out of virtual address space" from |
| 211 // "out of physical memory" in crash reports. | 211 // "out of physical memory" in crash reports. |
| 212 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB | 212 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 248 // - free() will detect when a full page has a slot free()'d and immediately | 248 // - free() will detect when a full page has a slot free()'d and immediately |
| 249 // return the page to the head of the active list. | 249 // return the page to the head of the active list. |
| 250 // - free() will detect when a page is fully emptied. It _may_ add it to the | 250 // - free() will detect when a page is fully emptied. It _may_ add it to the |
| 251 // empty list or it _may_ leave it on the active list until a future list scan. | 251 // empty list or it _may_ leave it on the active list until a future list scan. |
| 252 // - malloc() _may_ scan the active page list in order to fulfil the request. | 252 // - malloc() _may_ scan the active page list in order to fulfil the request. |
| 253 // If it does this, full, empty and decommitted pages encountered will be | 253 // If it does this, full, empty and decommitted pages encountered will be |
| 254 // booted out of the active list. If there are no suitable active pages found, | 254 // booted out of the active list. If there are no suitable active pages found, |
| 255 // an empty or decommitted page (if one exists) will be pulled from the empty | 255 // an empty or decommitted page (if one exists) will be pulled from the empty |
| 256 // list on to the active list. | 256 // list on to the active list. |
| 257 struct PartitionPage { | 257 struct PartitionPage { |
| 258 PartitionFreelistEntry* freelistHead; | 258 PartitionFreelistEntry* freelist_head; |
| 259 PartitionPage* nextPage; | 259 PartitionPage* next_page; |
| 260 PartitionBucket* bucket; | 260 PartitionBucket* bucket; |
| 261 // Deliberately signed, 0 for empty or decommitted page, -n for full pages: | 261 // Deliberately signed, 0 for empty or decommitted page, -n for full pages: |
| 262 int16_t numAllocatedSlots; | 262 int16_t num_allocated_slots; |
| 263 uint16_t numUnprovisionedSlots; | 263 uint16_t num_unprovisioned_slots; |
| 264 uint16_t pageOffset; | 264 uint16_t page_offset; |
| 265 int16_t emptyCacheIndex; // -1 if not in the empty cache. | 265 int16_t empty_cache_index; // -1 if not in the empty cache. |
| 266 }; | 266 }; |
| 267 | 267 |
| 268 struct PartitionBucket { | 268 struct PartitionBucket { |
| 269 PartitionPage* activePagesHead; // Accessed most in hot path => goes first. | 269 PartitionPage* active_pages_head; // Accessed most in hot path => goes first. |
| 270 PartitionPage* emptyPagesHead; | 270 PartitionPage* empty_pages_head; |
| 271 PartitionPage* decommittedPagesHead; | 271 PartitionPage* decommitted_pages_head; |
| 272 uint32_t slotSize; | 272 uint32_t slot_size; |
| 273 unsigned numSystemPagesPerSlotSpan : 8; | 273 unsigned num_system_pages_per_slot_span : 8; |
| 274 unsigned numFullPages : 24; | 274 unsigned num_full_pages : 24; |
| 275 }; | 275 }; |
| 276 | 276 |
| 277 // An "extent" is a span of consecutive superpages. We link to the partition's | 277 // An "extent" is a span of consecutive superpages. We link to the partition's |
| 278 // next extent (if there is one) at the very start of a superpage's metadata | 278 // next extent (if there is one) at the very start of a superpage's metadata |
| 279 // area. | 279 // area. |
| 280 struct PartitionSuperPageExtentEntry { | 280 struct PartitionSuperPageExtentEntry { |
| 281 PartitionRootBase* root; | 281 PartitionRootBase* root; |
| 282 char* superPageBase; | 282 char* super_page_base; |
| 283 char* superPagesEnd; | 283 char* super_pages_end; |
| 284 PartitionSuperPageExtentEntry* next; | 284 PartitionSuperPageExtentEntry* next; |
| 285 }; | 285 }; |
| 286 | 286 |
| 287 struct PartitionDirectMapExtent { | 287 struct PartitionDirectMapExtent { |
| 288 PartitionDirectMapExtent* nextExtent; | 288 PartitionDirectMapExtent* next_extent; |
| 289 PartitionDirectMapExtent* prevExtent; | 289 PartitionDirectMapExtent* prev_extent; |
| 290 PartitionBucket* bucket; | 290 PartitionBucket* bucket; |
| 291 size_t mapSize; // Mapped size, not including guard pages and meta-data. | 291 size_t map_size; // Mapped size, not including guard pages and meta-data. |
| 292 }; | 292 }; |
| 293 | 293 |
| 294 struct BASE_EXPORT PartitionRootBase { | 294 struct BASE_EXPORT PartitionRootBase { |
| 295 size_t totalSizeOfCommittedPages; | 295 size_t total_size_of_committed_pages; |
| 296 size_t totalSizeOfSuperPages; | 296 size_t total_size_of_super_pages; |
| 297 size_t totalSizeOfDirectMappedPages; | 297 size_t total_size_of_direct_mapped_pages; |
| 298 // Invariant: totalSizeOfCommittedPages <= | 298 // Invariant: total_size_of_committed_pages <= |
| 299 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. | 299 // total_size_of_super_pages + |
| 300 unsigned numBuckets; | 300 // total_size_of_direct_mapped_pages. |
| 301 unsigned maxAllocation; | 301 unsigned num_buckets; |
| 302 unsigned max_allocation; |
| 302 bool initialized; | 303 bool initialized; |
| 303 char* nextSuperPage; | 304 char* next_super_page; |
| 304 char* nextPartitionPage; | 305 char* next_partition_page; |
| 305 char* nextPartitionPageEnd; | 306 char* next_partition_page_end; |
| 306 PartitionSuperPageExtentEntry* currentExtent; | 307 PartitionSuperPageExtentEntry* current_extent; |
| 307 PartitionSuperPageExtentEntry* firstExtent; | 308 PartitionSuperPageExtentEntry* first_extent; |
| 308 PartitionDirectMapExtent* directMapList; | 309 PartitionDirectMapExtent* direct_map_list; |
| 309 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; | 310 PartitionPage* global_empty_page_ring[kMaxFreeableSpans]; |
| 310 int16_t globalEmptyPageRingIndex; | 311 int16_t global_empty_page_ring_index; |
| 311 uintptr_t invertedSelf; | 312 uintptr_t inverted_self; |
| 312 | 313 |
| 313 static subtle::SpinLock gInitializedLock; | 314 static subtle::SpinLock gInitializedLock; |
| 314 static bool gInitialized; | 315 static bool gInitialized; |
| 315 // gSeedPage is used as a sentinel to indicate that there is no page | 316 // gSeedPage is used as a sentinel to indicate that there is no page |
| 316 // in the active page list. We can use nullptr, but in that case we need | 317 // in the active page list. We can use nullptr, but in that case we need |
| 317 // to add a null-check branch to the hot allocation path. We want to avoid | 318 // to add a null-check branch to the hot allocation path. We want to avoid |
| 318 // that. | 319 // that. |
| 319 static PartitionPage gSeedPage; | 320 static PartitionPage gSeedPage; |
| 320 static PartitionBucket gPagedBucket; | 321 static PartitionBucket gPagedBucket; |
| 321 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. | 322 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. |
| 322 static void (*gOomHandlingFunction)(); | 323 static void (*gOomHandlingFunction)(); |
| 323 }; | 324 }; |
| 324 | 325 |
| 325 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. | 326 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. |
| 326 struct PartitionRoot : public PartitionRootBase { | 327 struct PartitionRoot : public PartitionRootBase { |
| 327 // The PartitionAlloc templated class ensures the following is correct. | 328 // The PartitionAlloc templated class ensures the following is correct. |
| 328 ALWAYS_INLINE PartitionBucket* buckets() { | 329 ALWAYS_INLINE PartitionBucket* buckets() { |
| 329 return reinterpret_cast<PartitionBucket*>(this + 1); | 330 return reinterpret_cast<PartitionBucket*>(this + 1); |
| 330 } | 331 } |
| 331 ALWAYS_INLINE const PartitionBucket* buckets() const { | 332 ALWAYS_INLINE const PartitionBucket* buckets() const { |
| 332 return reinterpret_cast<const PartitionBucket*>(this + 1); | 333 return reinterpret_cast<const PartitionBucket*>(this + 1); |
| 333 } | 334 } |
| 334 }; | 335 }; |
| 335 | 336 |
| 336 // Never instantiate a PartitionRootGeneric directly, instead use | 337 // Never instantiate a PartitionRootGeneric directly, instead use |
| 337 // PartitionAllocatorGeneric. | 338 // PartitionAllocatorGeneric. |
| 338 struct PartitionRootGeneric : public PartitionRootBase { | 339 struct PartitionRootGeneric : public PartitionRootBase { |
| 339 subtle::SpinLock lock; | 340 subtle::SpinLock lock; |
| 340 // Some pre-computed constants. | 341 // Some pre-computed constants. |
| 341 size_t orderIndexShifts[kBitsPerSizet + 1]; | 342 size_t order_index_shifts[kBitsPerSizeT + 1]; |
| 342 size_t orderSubIndexMasks[kBitsPerSizet + 1]; | 343 size_t order_sub_index_masks[kBitsPerSizeT + 1]; |
| 343 // The bucket lookup table lets us map a size_t to a bucket quickly. | 344 // The bucket lookup table lets us map a size_t to a bucket quickly. |
| 344 // The trailing +1 caters for the overflow case for very large allocation | 345 // The trailing +1 caters for the overflow case for very large allocation |
| 345 // sizes. It is one flat array instead of a 2D array because in the 2D | 346 // sizes. It is one flat array instead of a 2D array because in the 2D |
| 346 // world, we'd need to index array[blah][max+1] which risks undefined | 347 // world, we'd need to index array[blah][max+1] which risks undefined |
| 347 // behavior. | 348 // behavior. |
| 348 PartitionBucket* | 349 PartitionBucket* |
| 349 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; | 350 bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1]; |
| 350 PartitionBucket buckets[kGenericNumBuckets]; | 351 PartitionBucket buckets[kGenericNumBuckets]; |
| 351 }; | 352 }; |
| 352 | 353 |
| 353 // Flags for partitionAllocGenericFlags. | 354 // Flags for PartitionAllocGenericFlags. |
| 354 enum PartitionAllocFlags { | 355 enum PartitionAllocFlags { |
| 355 PartitionAllocReturnNull = 1 << 0, | 356 PartitionAllocReturnNull = 1 << 0, |
| 356 }; | 357 }; |
| 357 | 358 |
| 358 // Struct used to retrieve total memory usage of a partition. Used by | 359 // Struct used to retrieve total memory usage of a partition. Used by |
| 359 // PartitionStatsDumper implementation. | 360 // PartitionStatsDumper implementation. |
| 360 struct PartitionMemoryStats { | 361 struct PartitionMemoryStats { |
| 361 size_t totalMmappedBytes; // Total bytes mmaped from the system. | 362 size_t total_mmapped_bytes; // Total bytes mmaped from the system. |
| 362 size_t totalCommittedBytes; // Total size of commmitted pages. | 363 size_t total_committed_bytes; // Total size of commmitted pages. |
| 363 size_t totalResidentBytes; // Total bytes provisioned by the partition. | 364 size_t total_resident_bytes; // Total bytes provisioned by the partition. |
| 364 size_t totalActiveBytes; // Total active bytes in the partition. | 365 size_t total_active_bytes; // Total active bytes in the partition. |
| 365 size_t totalDecommittableBytes; // Total bytes that could be decommitted. | 366 size_t total_decommittable_bytes; // Total bytes that could be decommitted. |
| 366 size_t totalDiscardableBytes; // Total bytes that could be discarded. | 367 size_t total_discardable_bytes; // Total bytes that could be discarded. |
| 367 }; | 368 }; |
| 368 | 369 |
| 369 // Struct used to retrieve memory statistics about a partition bucket. Used by | 370 // Struct used to retrieve memory statistics about a partition bucket. Used by |
| 370 // PartitionStatsDumper implementation. | 371 // PartitionStatsDumper implementation. |
| 371 struct PartitionBucketMemoryStats { | 372 struct PartitionBucketMemoryStats { |
| 372 bool isValid; // Used to check if the stats is valid. | 373 bool is_valid; // Used to check if the stats is valid. |
| 373 bool isDirectMap; // True if this is a direct mapping; size will not be | 374 bool is_direct_map; // True if this is a direct mapping; size will not be |
| 374 // unique. | 375 // unique. |
| 375 uint32_t bucketSlotSize; // The size of the slot in bytes. | 376 uint32_t bucket_slot_size; // The size of the slot in bytes. |
| 376 uint32_t allocatedPageSize; // Total size the partition page allocated from | 377 uint32_t allocated_page_size; // Total size the partition page allocated from |
| 377 // the system. | 378 // the system. |
| 378 uint32_t activeBytes; // Total active bytes used in the bucket. | 379 uint32_t active_bytes; // Total active bytes used in the bucket. |
| 379 uint32_t residentBytes; // Total bytes provisioned in the bucket. | 380 uint32_t resident_bytes; // Total bytes provisioned in the bucket. |
| 380 uint32_t decommittableBytes; // Total bytes that could be decommitted. | 381 uint32_t decommittable_bytes; // Total bytes that could be decommitted. |
| 381 uint32_t discardableBytes; // Total bytes that could be discarded. | 382 uint32_t discardable_bytes; // Total bytes that could be discarded. |
| 382 uint32_t numFullPages; // Number of pages with all slots allocated. | 383 uint32_t num_full_pages; // Number of pages with all slots allocated. |
| 383 uint32_t numActivePages; // Number of pages that have at least one | 384 uint32_t num_active_pages; // Number of pages that have at least one |
| 384 // provisioned slot. | 385 // provisioned slot. |
| 385 uint32_t numEmptyPages; // Number of pages that are empty | 386 uint32_t num_empty_pages; // Number of pages that are empty |
| 386 // but not decommitted. | 387 // but not decommitted. |
| 387 uint32_t numDecommittedPages; // Number of pages that are empty | 388 uint32_t num_decommitted_pages; // Number of pages that are empty |
| 388 // and decommitted. | 389 // and decommitted. |
| 389 }; | 390 }; |
| 390 | 391 |
| 391 // Interface that is passed to partitionDumpStats and | 392 // Interface that is passed to PartitionDumpStats and |
| 392 // partitionDumpStatsGeneric for using the memory statistics. | 393 // PartitionDumpStatsGeneric for using the memory statistics. |
| 393 class BASE_EXPORT PartitionStatsDumper { | 394 class BASE_EXPORT PartitionStatsDumper { |
| 394 public: | 395 public: |
| 395 // Called to dump total memory used by partition, once per partition. | 396 // Called to dump total memory used by partition, once per partition. |
| 396 virtual void partitionDumpTotals(const char* partitionName, | 397 virtual void PartitionDumpTotals(const char* partition_name, |
| 397 const PartitionMemoryStats*) = 0; | 398 const PartitionMemoryStats*) = 0; |
| 398 | 399 |
| 399 // Called to dump stats about buckets, for each bucket. | 400 // Called to dump stats about buckets, for each bucket. |
| 400 virtual void partitionsDumpBucketStats(const char* partitionName, | 401 virtual void PartitionsDumpBucketStats(const char* partition_name, |
| 401 const PartitionBucketMemoryStats*) = 0; | 402 const PartitionBucketMemoryStats*) = 0; |
| 402 }; | 403 }; |
| 403 | 404 |
| 404 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); | 405 BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)()); |
| 405 BASE_EXPORT void partitionAllocInit(PartitionRoot*, | 406 BASE_EXPORT void PartitionAllocInit(PartitionRoot*, |
| 406 size_t numBuckets, | 407 size_t num_buckets, |
| 407 size_t maxAllocation); | 408 size_t max_allocation); |
| 408 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*); | 409 BASE_EXPORT bool PartitionAllocShutdown(PartitionRoot*); |
| 409 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); | 410 BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*); |
| 410 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); | 411 BASE_EXPORT bool PartitionAllocGenericShutdown(PartitionRootGeneric*); |
| 411 | 412 |
| 412 enum PartitionPurgeFlags { | 413 enum PartitionPurgeFlags { |
| 413 // Decommitting the ring list of empty pages is reasonably fast. | 414 // Decommitting the ring list of empty pages is reasonably fast. |
| 414 PartitionPurgeDecommitEmptyPages = 1 << 0, | 415 PartitionPurgeDecommitEmptyPages = 1 << 0, |
| 415 // Discarding unused system pages is slower, because it involves walking all | 416 // Discarding unused system pages is slower, because it involves walking all |
| 416 // freelists in all active partition pages of all buckets >= system page | 417 // freelists in all active partition pages of all buckets >= system page |
| 417 // size. It often frees a similar amount of memory to decommitting the empty | 418 // size. It often frees a similar amount of memory to decommitting the empty |
| 418 // pages, though. | 419 // pages, though. |
| 419 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, | 420 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, |
| 420 }; | 421 }; |
| 421 | 422 |
| 422 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int); | 423 BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int); |
| 423 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); | 424 BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int); |
| 424 | 425 |
| 425 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*, | 426 BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*, |
| 426 int, | 427 int, |
| 427 size_t, | 428 size_t, |
| 428 PartitionBucket*); | 429 PartitionBucket*); |
| 429 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*); | 430 BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*); |
| 430 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*, | 431 BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*, |
| 431 void*, | 432 void*, |
| 432 size_t, | 433 size_t, |
| 433 const char* typeName); | 434 const char* type_name); |
| 434 | 435 |
| 435 BASE_EXPORT void partitionDumpStats(PartitionRoot*, | 436 BASE_EXPORT void PartitionDumpStats(PartitionRoot*, |
| 436 const char* partitionName, | 437 const char* partition_name, |
| 437 bool isLightDump, | 438 bool is_light_dump, |
| 438 PartitionStatsDumper*); | 439 PartitionStatsDumper*); |
| 439 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, | 440 BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*, |
| 440 const char* partitionName, | 441 const char* partition_name, |
| 441 bool isLightDump, | 442 bool is_light_dump, |
| 442 PartitionStatsDumper*); | 443 PartitionStatsDumper*); |
| 443 | 444 |
| 444 class BASE_EXPORT PartitionAllocHooks { | 445 class BASE_EXPORT PartitionAllocHooks { |
| 445 public: | 446 public: |
| 446 typedef void AllocationHook(void* address, size_t, const char* typeName); | 447 typedef void AllocationHook(void* address, size_t, const char* type_name); |
| 447 typedef void FreeHook(void* address); | 448 typedef void FreeHook(void* address); |
| 448 | 449 |
| 449 static void setAllocationHook(AllocationHook* hook) { | 450 static void SetAllocationHook(AllocationHook* hook) { |
| 450 m_allocationHook = hook; | 451 allocation_hook_ = hook; |
| 451 } | 452 } |
| 452 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } | 453 static void SetFreeHook(FreeHook* hook) { free_hook_ = hook; } |
| 453 | 454 |
| 454 static void allocationHookIfEnabled(void* address, | 455 static void AllocationHookIfEnabled(void* address, |
| 455 size_t size, | 456 size_t size, |
| 456 const char* typeName) { | 457 const char* type_name) { |
| 457 AllocationHook* allocationHook = m_allocationHook; | 458 AllocationHook* hook = allocation_hook_; |
| 458 if (UNLIKELY(allocationHook != nullptr)) | 459 if (UNLIKELY(hook != nullptr)) |
| 459 allocationHook(address, size, typeName); | 460 hook(address, size, type_name); |
| 460 } | 461 } |
| 461 | 462 |
| 462 static void freeHookIfEnabled(void* address) { | 463 static void FreeHookIfEnabled(void* address) { |
| 463 FreeHook* freeHook = m_freeHook; | 464 FreeHook* hook = free_hook_; |
| 464 if (UNLIKELY(freeHook != nullptr)) | 465 if (UNLIKELY(hook != nullptr)) |
| 465 freeHook(address); | 466 hook(address); |
| 466 } | 467 } |
| 467 | 468 |
| 468 static void reallocHookIfEnabled(void* oldAddress, | 469 static void ReallocHookIfEnabled(void* old_address, |
| 469 void* newAddress, | 470 void* new_address, |
| 470 size_t size, | 471 size_t size, |
| 471 const char* typeName) { | 472 const char* type_name) { |
| 472 // Report a reallocation as a free followed by an allocation. | 473 // Report a reallocation as a free followed by an allocation. |
| 473 AllocationHook* allocationHook = m_allocationHook; | 474 AllocationHook* allocation_hook = allocation_hook_; |
| 474 FreeHook* freeHook = m_freeHook; | 475 FreeHook* free_hook = free_hook_; |
| 475 if (UNLIKELY(allocationHook && freeHook)) { | 476 if (UNLIKELY(allocation_hook && free_hook)) { |
| 476 freeHook(oldAddress); | 477 free_hook(old_address); |
| 477 allocationHook(newAddress, size, typeName); | 478 allocation_hook(new_address, size, type_name); |
| 478 } | 479 } |
| 479 } | 480 } |
| 480 | 481 |
| 481 private: | 482 private: |
| 482 // Pointers to hook functions that PartitionAlloc will call on allocation and | 483 // Pointers to hook functions that PartitionAlloc will call on allocation and |
| 483 // free if the pointers are non-null. | 484 // free if the pointers are non-null. |
| 484 static AllocationHook* m_allocationHook; | 485 static AllocationHook* allocation_hook_; |
| 485 static FreeHook* m_freeHook; | 486 static FreeHook* free_hook_; |
| 486 }; | 487 }; |
| 487 | 488 |
| 488 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( | 489 ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask( |
| 489 PartitionFreelistEntry* ptr) { | 490 PartitionFreelistEntry* ptr) { |
| 490 // We use bswap on little endian as a fast mask for two reasons: | 491 // We use bswap on little endian as a fast mask for two reasons: |
| 491 // 1) If an object is freed and its vtable used where the attacker doesn't | 492 // 1) If an object is freed and its vtable used where the attacker doesn't |
| 492 // get the chance to run allocations between the free and use, the vtable | 493 // get the chance to run allocations between the free and use, the vtable |
| 493 // dereference is likely to fault. | 494 // dereference is likely to fault. |
| 494 // 2) If the attacker has a linear buffer overflow and elects to try and | 495 // 2) If the attacker has a linear buffer overflow and elects to try and |
| 495 // corrupt a freelist pointer, partial pointer overwrite attacks are | 496 // corrupt a freelist pointer, partial pointer overwrite attacks are |
| 496 // thwarted. | 497 // thwarted. |
| 497 // For big endian, similar guarantees are arrived at with a negation. | 498 // For big endian, similar guarantees are arrived at with a negation. |
| 498 #if defined(ARCH_CPU_BIG_ENDIAN) | 499 #if defined(ARCH_CPU_BIG_ENDIAN) |
| 499 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); | 500 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); |
| 500 #else | 501 #else |
| 501 uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr)); | 502 uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr)); |
| 502 #endif | 503 #endif |
| 503 return reinterpret_cast<PartitionFreelistEntry*>(masked); | 504 return reinterpret_cast<PartitionFreelistEntry*>(masked); |
| 504 } | 505 } |
| 505 | 506 |
| 506 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { | 507 ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) { |
| 507 #if DCHECK_IS_ON() | 508 #if DCHECK_IS_ON() |
| 508 // Add space for cookies, checking for integer overflow. | 509 // Add space for cookies, checking for integer overflow. TODO(palmer): |
| 510 // Investigate the performance and code size implications of using |
| 511 // CheckedNumeric throughout PA. |
| 509 DCHECK(size + (2 * kCookieSize) > size); | 512 DCHECK(size + (2 * kCookieSize) > size); |
| 510 size += 2 * kCookieSize; | 513 size += 2 * kCookieSize; |
| 511 #endif | 514 #endif |
| 512 return size; | 515 return size; |
| 513 } | 516 } |
| 514 | 517 |
| 515 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { | 518 ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) { |
| 516 #if DCHECK_IS_ON() | 519 #if DCHECK_IS_ON() |
| 517 // Remove space for cookies. | 520 // Remove space for cookies. |
| 518 DCHECK(size >= 2 * kCookieSize); | 521 DCHECK(size >= 2 * kCookieSize); |
| 519 size -= 2 * kCookieSize; | 522 size -= 2 * kCookieSize; |
| 520 #endif | 523 #endif |
| 521 return size; | 524 return size; |
| 522 } | 525 } |
| 523 | 526 |
| 524 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { | 527 ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) { |
| 525 #if DCHECK_IS_ON() | 528 #if DCHECK_IS_ON() |
| 526 // The value given to the application is actually just after the cookie. | 529 // The value given to the application is actually just after the cookie. |
| 527 ptr = static_cast<char*>(ptr) - kCookieSize; | 530 ptr = static_cast<char*>(ptr) - kCookieSize; |
| 528 #endif | 531 #endif |
| 529 return ptr; | 532 return ptr; |
| 530 } | 533 } |
| 531 | 534 |
| 532 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { | 535 ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) { |
| 533 #if DCHECK_IS_ON() | 536 #if DCHECK_IS_ON() |
| 534 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 537 unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); |
| 535 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 538 for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) |
| 536 *cookiePtr = kCookieValue[i]; | 539 *cookie_ptr = kCookieValue[i]; |
| 537 #endif | 540 #endif |
| 538 } | 541 } |
| 539 | 542 |
| 540 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { | 543 ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) { |
| 541 #if DCHECK_IS_ON() | 544 #if DCHECK_IS_ON() |
| 542 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 545 unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); |
| 543 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 546 for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) |
| 544 DCHECK(*cookiePtr == kCookieValue[i]); | 547 DCHECK(*cookie_ptr == kCookieValue[i]); |
| 545 #endif | 548 #endif |
| 546 } | 549 } |
| 547 | 550 |
| 548 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { | 551 ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { |
| 549 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 552 uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); |
| 550 DCHECK(!(pointerAsUint & kSuperPageOffsetMask)); | 553 DCHECK(!(pointer_as_uint & kSuperPageOffsetMask)); |
| 551 // The metadata area is exactly one system page (the guard page) into the | 554 // The metadata area is exactly one system page (the guard page) into the |
| 552 // super page. | 555 // super page. |
| 553 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); | 556 return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize); |
| 554 } | 557 } |
| 555 | 558 |
| 556 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { | 559 ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) { |
| 557 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 560 uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); |
| 558 char* superPagePtr = | 561 char* super_page_ptr = |
| 559 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); | 562 reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask); |
| 560 uintptr_t partitionPageIndex = | 563 uintptr_t partition_page_index = |
| 561 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; | 564 (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift; |
| 562 // Index 0 is invalid because it is the metadata and guard area and | 565 // Index 0 is invalid because it is the metadata and guard area and |
| 563 // the last index is invalid because it is a guard page. | 566 // the last index is invalid because it is a guard page. |
| 564 DCHECK(partitionPageIndex); | 567 DCHECK(partition_page_index); |
| 565 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 568 DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); |
| 566 PartitionPage* page = reinterpret_cast<PartitionPage*>( | 569 PartitionPage* page = reinterpret_cast<PartitionPage*>( |
| 567 partitionSuperPageToMetadataArea(superPagePtr) + | 570 PartitionSuperPageToMetadataArea(super_page_ptr) + |
| 568 (partitionPageIndex << kPageMetadataShift)); | 571 (partition_page_index << kPageMetadataShift)); |
| 569 // Partition pages in the same slot span can share the same page object. | 572 // Partition pages in the same slot span can share the same page object. |
| 570 // Adjust for that. | 573 // Adjust for that. |
| 571 size_t delta = page->pageOffset << kPageMetadataShift; | 574 size_t delta = page->page_offset << kPageMetadataShift; |
| 572 page = | 575 page = |
| 573 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); | 576 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); |
| 574 return page; | 577 return page; |
| 575 } | 578 } |
| 576 | 579 |
| 577 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { | 580 ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) { |
| 578 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); | 581 uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page); |
| 579 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); | 582 uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask); |
| 580 DCHECK(superPageOffset > kSystemPageSize); | 583 DCHECK(super_page_offset > kSystemPageSize); |
| 581 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * | 584 DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * |
| 582 kPageMetadataSize)); | 585 kPageMetadataSize)); |
| 583 uintptr_t partitionPageIndex = | 586 uintptr_t partition_page_index = |
| 584 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; | 587 (super_page_offset - kSystemPageSize) >> kPageMetadataShift; |
| 585 // Index 0 is invalid because it is the metadata area and the last index is | 588 // Index 0 is invalid because it is the metadata area and the last index is |
| 586 // invalid because it is a guard page. | 589 // invalid because it is a guard page. |
| 587 DCHECK(partitionPageIndex); | 590 DCHECK(partition_page_index); |
| 588 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 591 DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); |
| 589 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); | 592 uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask); |
| 590 void* ret = reinterpret_cast<void*>( | 593 void* ret = reinterpret_cast<void*>( |
| 591 superPageBase + (partitionPageIndex << kPartitionPageShift)); | 594 super_page_base + (partition_page_index << kPartitionPageShift)); |
| 592 return ret; | 595 return ret; |
| 593 } | 596 } |
| 594 | 597 |
| 595 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { | 598 ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) { |
| 596 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); | 599 PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr); |
| 597 // Checks that the pointer is a multiple of bucket size. | 600 // Checks that the pointer is a multiple of bucket size. |
| 598 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - | 601 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - |
| 599 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % | 602 reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) % |
| 600 page->bucket->slotSize)); | 603 page->bucket->slot_size)); |
| 601 return page; | 604 return page; |
| 602 } | 605 } |
| 603 | 606 |
| 604 ALWAYS_INLINE bool partitionBucketIsDirectMapped( | 607 ALWAYS_INLINE bool PartitionBucketIsDirectMapped( |
| 605 const PartitionBucket* bucket) { | 608 const PartitionBucket* bucket) { |
| 606 return !bucket->numSystemPagesPerSlotSpan; | 609 return !bucket->num_system_pages_per_slot_span; |
| 607 } | 610 } |
| 608 | 611 |
| 609 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { | 612 ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) { |
| 610 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; | 613 return bucket->num_system_pages_per_slot_span * kSystemPageSize; |
| 611 } | 614 } |
| 612 | 615 |
| 613 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { | 616 ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) { |
| 614 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); | 617 return static_cast<uint16_t>(PartitionBucketBytes(bucket) / |
| 615 } | 618 bucket->slot_size); |
| 616 | 619 } |
| 617 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { | 620 |
| 621 ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) { |
| 618 // For single-slot buckets which span more than one partition page, we | 622 // For single-slot buckets which span more than one partition page, we |
| 619 // have some spare metadata space to store the raw allocation size. We | 623 // have some spare metadata space to store the raw allocation size. We |
| 620 // can use this to report better statistics. | 624 // can use this to report better statistics. |
| 621 PartitionBucket* bucket = page->bucket; | 625 PartitionBucket* bucket = page->bucket; |
| 622 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | 626 if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) |
| 623 return nullptr; | 627 return nullptr; |
| 624 | 628 |
| 625 DCHECK((bucket->slotSize % kSystemPageSize) == 0); | 629 DCHECK((bucket->slot_size % kSystemPageSize) == 0); |
| 626 DCHECK(partitionBucketIsDirectMapped(bucket) || | 630 DCHECK(PartitionBucketIsDirectMapped(bucket) || |
| 627 partitionBucketSlots(bucket) == 1); | 631 PartitionBucketSlots(bucket) == 1); |
| 628 page++; | 632 page++; |
| 629 return reinterpret_cast<size_t*>(&page->freelistHead); | 633 return reinterpret_cast<size_t*>(&page->freelist_head); |
| 630 } | 634 } |
| 631 | 635 |
| 632 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { | 636 ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) { |
| 633 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 637 size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page); |
| 634 if (UNLIKELY(rawSizePtr != nullptr)) | 638 if (UNLIKELY(raw_size_ptr != nullptr)) |
| 635 return *rawSizePtr; | 639 return *raw_size_ptr; |
| 636 return 0; | 640 return 0; |
| 637 } | 641 } |
| 638 | 642 |
| 639 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) { | 643 ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) { |
| 640 PartitionSuperPageExtentEntry* extentEntry = | 644 PartitionSuperPageExtentEntry* extent_entry = |
| 641 reinterpret_cast<PartitionSuperPageExtentEntry*>( | 645 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 642 reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); | 646 reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); |
| 643 return extentEntry->root; | 647 return extent_entry->root; |
| 644 } | 648 } |
| 645 | 649 |
| 646 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) { | 650 ALWAYS_INLINE bool PartitionPointerIsValid(void* ptr) { |
| 647 PartitionPage* page = partitionPointerToPage(ptr); | 651 PartitionPage* page = PartitionPointerToPage(ptr); |
| 648 PartitionRootBase* root = partitionPageToRoot(page); | 652 PartitionRootBase* root = PartitionPageToRoot(page); |
| 649 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); | 653 return root->inverted_self == ~reinterpret_cast<uintptr_t>(root); |
| 650 } | 654 } |
| 651 | 655 |
| 652 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, | 656 ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root, |
| 653 int flags, | 657 int flags, |
| 654 size_t size, | 658 size_t size, |
| 655 PartitionBucket* bucket) { | 659 PartitionBucket* bucket) { |
| 656 PartitionPage* page = bucket->activePagesHead; | 660 PartitionPage* page = bucket->active_pages_head; |
| 657 // Check that this page is neither full nor freed. | 661 // Check that this page is neither full nor freed. |
| 658 DCHECK(page->numAllocatedSlots >= 0); | 662 DCHECK(page->num_allocated_slots >= 0); |
| 659 void* ret = page->freelistHead; | 663 void* ret = page->freelist_head; |
| 660 if (LIKELY(ret != 0)) { | 664 if (LIKELY(ret != 0)) { |
| 661 // If these asserts fire, you probably corrupted memory. | 665 // If these asserts fire, you probably corrupted memory. |
| 662 DCHECK(partitionPointerIsValid(ret)); | 666 DCHECK(PartitionPointerIsValid(ret)); |
| 663 // All large allocations must go through the slow path to correctly | 667 // All large allocations must go through the slow path to correctly |
| 664 // update the size metadata. | 668 // update the size metadata. |
| 665 DCHECK(partitionPageGetRawSize(page) == 0); | 669 DCHECK(PartitionPageGetRawSize(page) == 0); |
| 666 PartitionFreelistEntry* newHead = | 670 PartitionFreelistEntry* new_head = |
| 667 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); | 671 PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); |
| 668 page->freelistHead = newHead; | 672 page->freelist_head = new_head; |
| 669 page->numAllocatedSlots++; | 673 page->num_allocated_slots++; |
| 670 } else { | 674 } else { |
| 671 ret = partitionAllocSlowPath(root, flags, size, bucket); | 675 ret = PartitionAllocSlowPath(root, flags, size, bucket); |
| 672 DCHECK(!ret || partitionPointerIsValid(ret)); | 676 DCHECK(!ret || PartitionPointerIsValid(ret)); |
| 673 } | 677 } |
| 674 #if DCHECK_IS_ON() | 678 #if DCHECK_IS_ON() |
| 675 if (!ret) | 679 if (!ret) |
| 676 return 0; | 680 return 0; |
| 677 // Fill the uninitialized pattern, and write the cookies. | 681 // Fill the uninitialized pattern, and write the cookies. |
| 678 page = partitionPointerToPage(ret); | 682 page = PartitionPointerToPage(ret); |
| 679 size_t slotSize = page->bucket->slotSize; | 683 size_t slot_size = page->bucket->slot_size; |
| 680 size_t rawSize = partitionPageGetRawSize(page); | 684 size_t raw_size = PartitionPageGetRawSize(page); |
| 681 if (rawSize) { | 685 if (raw_size) { |
| 682 DCHECK(rawSize == size); | 686 DCHECK(raw_size == size); |
| 683 slotSize = rawSize; | 687 slot_size = raw_size; |
| 684 } | 688 } |
| 685 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); | 689 size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size); |
| 686 char* charRet = static_cast<char*>(ret); | 690 char* char_ret = static_cast<char*>(ret); |
| 687 // The value given to the application is actually just after the cookie. | 691 // The value given to the application is actually just after the cookie. |
| 688 ret = charRet + kCookieSize; | 692 ret = char_ret + kCookieSize; |
| 689 memset(ret, kUninitializedByte, noCookieSize); | 693 memset(ret, kUninitializedByte, no_cookie_size); |
| 690 partitionCookieWriteValue(charRet); | 694 PartitionCookieWriteValue(char_ret); |
| 691 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); | 695 PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size); |
| 692 #endif | 696 #endif |
| 693 return ret; | 697 return ret; |
| 694 } | 698 } |
| 695 | 699 |
| 696 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, | 700 ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root, |
| 697 size_t size, | 701 size_t size, |
| 698 const char* typeName) { | 702 const char* type_name) { |
| 699 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 703 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 700 void* result = malloc(size); | 704 void* result = malloc(size); |
| 701 CHECK(result); | 705 CHECK(result); |
| 702 return result; | 706 return result; |
| 703 #else | 707 #else |
| 704 size_t requestedSize = size; | 708 size_t requested_size = size; |
| 705 size = partitionCookieSizeAdjustAdd(size); | 709 size = PartitionCookieSizeAdjustAdd(size); |
| 706 DCHECK(root->initialized); | 710 DCHECK(root->initialized); |
| 707 size_t index = size >> kBucketShift; | 711 size_t index = size >> kBucketShift; |
| 708 DCHECK(index < root->numBuckets); | 712 DCHECK(index < root->num_buckets); |
| 709 DCHECK(size == index << kBucketShift); | 713 DCHECK(size == index << kBucketShift); |
| 710 PartitionBucket* bucket = &root->buckets()[index]; | 714 PartitionBucket* bucket = &root->buckets()[index]; |
| 711 void* result = partitionBucketAlloc(root, 0, size, bucket); | 715 void* result = PartitionBucketAlloc(root, 0, size, bucket); |
| 712 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); | 716 PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size, |
| 717 type_name); |
| 713 return result; | 718 return result; |
| 714 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 719 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 715 } | 720 } |
| 716 | 721 |
| 717 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { | 722 ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) { |
| 718 // If these asserts fire, you probably corrupted memory. | 723 // If these asserts fire, you probably corrupted memory. |
| 719 #if DCHECK_IS_ON() | 724 #if DCHECK_IS_ON() |
| 720 size_t slotSize = page->bucket->slotSize; | 725 size_t slot_size = page->bucket->slot_size; |
| 721 size_t rawSize = partitionPageGetRawSize(page); | 726 size_t raw_size = PartitionPageGetRawSize(page); |
| 722 if (rawSize) | 727 if (raw_size) |
| 723 slotSize = rawSize; | 728 slot_size = raw_size; |
| 724 partitionCookieCheckValue(ptr); | 729 PartitionCookieCheckValue(ptr); |
| 725 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - | 730 PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size - |
| 726 kCookieSize); | 731 kCookieSize); |
| 727 memset(ptr, kFreedByte, slotSize); | 732 memset(ptr, kFreedByte, slot_size); |
| 728 #endif | 733 #endif |
| 729 DCHECK(page->numAllocatedSlots); | 734 DCHECK(page->num_allocated_slots); |
| 730 PartitionFreelistEntry* freelistHead = page->freelistHead; | 735 PartitionFreelistEntry* freelist_head = page->freelist_head; |
| 731 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead)); | 736 DCHECK(!freelist_head || PartitionPointerIsValid(freelist_head)); |
| 732 CHECK(ptr != freelistHead); // Catches an immediate double free. | 737 CHECK(ptr != freelist_head); // Catches an immediate double free. |
| 733 // Look for double free one level deeper in debug. | 738 // Look for double free one level deeper in debug. |
| 734 DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); | 739 DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next)); |
| 735 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); | 740 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); |
| 736 entry->next = partitionFreelistMask(freelistHead); | 741 entry->next = PartitionFreelistMask(freelist_head); |
| 737 page->freelistHead = entry; | 742 page->freelist_head = entry; |
| 738 --page->numAllocatedSlots; | 743 --page->num_allocated_slots; |
| 739 if (UNLIKELY(page->numAllocatedSlots <= 0)) { | 744 if (UNLIKELY(page->num_allocated_slots <= 0)) { |
| 740 partitionFreeSlowPath(page); | 745 PartitionFreeSlowPath(page); |
| 741 } else { | 746 } else { |
| 742 // All single-slot allocations must go through the slow path to | 747 // All single-slot allocations must go through the slow path to |
| 743 // correctly update the size metadata. | 748 // correctly update the size metadata. |
| 744 DCHECK(partitionPageGetRawSize(page) == 0); | 749 DCHECK(PartitionPageGetRawSize(page) == 0); |
| 745 } | 750 } |
| 746 } | 751 } |
| 747 | 752 |
| 748 ALWAYS_INLINE void partitionFree(void* ptr) { | 753 ALWAYS_INLINE void PartitionFree(void* ptr) { |
| 749 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 754 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 750 free(ptr); | 755 free(ptr); |
| 751 #else | 756 #else |
| 752 PartitionAllocHooks::freeHookIfEnabled(ptr); | 757 PartitionAllocHooks::FreeHookIfEnabled(ptr); |
| 753 ptr = partitionCookieFreePointerAdjust(ptr); | 758 ptr = PartitionCookieFreePointerAdjust(ptr); |
| 754 DCHECK(partitionPointerIsValid(ptr)); | 759 DCHECK(PartitionPointerIsValid(ptr)); |
| 755 PartitionPage* page = partitionPointerToPage(ptr); | 760 PartitionPage* page = PartitionPointerToPage(ptr); |
| 756 partitionFreeWithPage(ptr, page); | 761 PartitionFreeWithPage(ptr, page); |
| 757 #endif | 762 #endif |
| 758 } | 763 } |
| 759 | 764 |
| 760 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( | 765 ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket( |
| 761 PartitionRootGeneric* root, | 766 PartitionRootGeneric* root, |
| 762 size_t size) { | 767 size_t size) { |
| 763 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size); | 768 size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size); |
| 764 // The order index is simply the next few bits after the most significant bit. | 769 // The order index is simply the next few bits after the most significant bit. |
| 765 size_t orderIndex = (size >> root->orderIndexShifts[order]) & | 770 size_t order_index = (size >> root->order_index_shifts[order]) & |
| 766 (kGenericNumBucketsPerOrder - 1); | 771 (kGenericNumBucketsPerOrder - 1); |
| 767 // And if the remaining bits are non-zero we must bump the bucket up. | 772 // And if the remaining bits are non-zero we must bump the bucket up. |
| 768 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; | 773 size_t sub_order_index = size & root->order_sub_index_masks[order]; |
| 769 PartitionBucket* bucket = | 774 PartitionBucket* bucket = |
| 770 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + | 775 root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + |
| 771 orderIndex + !!subOrderIndex]; | 776 order_index + !!sub_order_index]; |
| 772 DCHECK(!bucket->slotSize || bucket->slotSize >= size); | 777 DCHECK(!bucket->slot_size || bucket->slot_size >= size); |
| 773 DCHECK(!(bucket->slotSize % kGenericSmallestBucket)); | 778 DCHECK(!(bucket->slot_size % kGenericSmallestBucket)); |
| 774 return bucket; | 779 return bucket; |
| 775 } | 780 } |
| 776 | 781 |
| 777 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, | 782 ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, |
| 778 int flags, | 783 int flags, |
| 779 size_t size, | 784 size_t size, |
| 780 const char* typeName) { | 785 const char* type_name) { |
| 781 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 786 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 782 void* result = malloc(size); | 787 void* result = malloc(size); |
| 783 CHECK(result || flags & PartitionAllocReturnNull); | 788 CHECK(result || flags & PartitionAllocReturnNull); |
| 784 return result; | 789 return result; |
| 785 #else | 790 #else |
| 786 DCHECK(root->initialized); | 791 DCHECK(root->initialized); |
| 787 size_t requestedSize = size; | 792 size_t requested_size = size; |
| 788 size = partitionCookieSizeAdjustAdd(size); | 793 size = PartitionCookieSizeAdjustAdd(size); |
| 789 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 794 PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); |
| 790 void* ret = nullptr; | 795 void* ret = nullptr; |
| 791 { | 796 { |
| 792 subtle::SpinLock::Guard guard(root->lock); | 797 subtle::SpinLock::Guard guard(root->lock); |
| 793 ret = partitionBucketAlloc(root, flags, size, bucket); | 798 ret = PartitionBucketAlloc(root, flags, size, bucket); |
| 794 } | 799 } |
| 795 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); | 800 PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name); |
| 796 return ret; | 801 return ret; |
| 797 #endif | 802 #endif |
| 798 } | 803 } |
| 799 | 804 |
| 800 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, | 805 ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root, |
| 801 size_t size, | 806 size_t size, |
| 802 const char* typeName) { | 807 const char* type_name) { |
| 803 return partitionAllocGenericFlags(root, 0, size, typeName); | 808 return PartitionAllocGenericFlags(root, 0, size, type_name); |
| 804 } | 809 } |
| 805 | 810 |
| 806 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { | 811 ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { |
| 807 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 812 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 808 free(ptr); | 813 free(ptr); |
| 809 #else | 814 #else |
| 810 DCHECK(root->initialized); | 815 DCHECK(root->initialized); |
| 811 | 816 |
| 812 if (UNLIKELY(!ptr)) | 817 if (UNLIKELY(!ptr)) |
| 813 return; | 818 return; |
| 814 | 819 |
| 815 PartitionAllocHooks::freeHookIfEnabled(ptr); | 820 PartitionAllocHooks::FreeHookIfEnabled(ptr); |
| 816 ptr = partitionCookieFreePointerAdjust(ptr); | 821 ptr = PartitionCookieFreePointerAdjust(ptr); |
| 817 DCHECK(partitionPointerIsValid(ptr)); | 822 DCHECK(PartitionPointerIsValid(ptr)); |
| 818 PartitionPage* page = partitionPointerToPage(ptr); | 823 PartitionPage* page = PartitionPointerToPage(ptr); |
| 819 { | 824 { |
| 820 subtle::SpinLock::Guard guard(root->lock); | 825 subtle::SpinLock::Guard guard(root->lock); |
| 821 partitionFreeWithPage(ptr, page); | 826 PartitionFreeWithPage(ptr, page); |
| 822 } | 827 } |
| 823 #endif | 828 #endif |
| 824 } | 829 } |
| 825 | 830 |
| 826 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { | 831 ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) { |
| 827 // Caller must check that the size is not above the kGenericMaxDirectMapped | 832 // Caller must check that the size is not above the kGenericMaxDirectMapped |
| 828 // limit before calling. This also guards against integer overflow in the | 833 // limit before calling. This also guards against integer overflow in the |
| 829 // calculation here. | 834 // calculation here. |
| 830 DCHECK(size <= kGenericMaxDirectMapped); | 835 DCHECK(size <= kGenericMaxDirectMapped); |
| 831 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 836 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 832 } | 837 } |
| 833 | 838 |
| 834 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, | 839 ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root, |
| 835 size_t size) { | 840 size_t size) { |
| 836 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 841 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 837 return size; | 842 return size; |
| 838 #else | 843 #else |
| 839 DCHECK(root->initialized); | 844 DCHECK(root->initialized); |
| 840 size = partitionCookieSizeAdjustAdd(size); | 845 size = PartitionCookieSizeAdjustAdd(size); |
| 841 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 846 PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); |
| 842 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { | 847 if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) { |
| 843 size = bucket->slotSize; | 848 size = bucket->slot_size; |
| 844 } else if (size > kGenericMaxDirectMapped) { | 849 } else if (size > kGenericMaxDirectMapped) { |
| 845 // Too large to allocate => return the size unchanged. | 850 // Too large to allocate => return the size unchanged. |
| 846 } else { | 851 } else { |
| 847 DCHECK(bucket == &PartitionRootBase::gPagedBucket); | 852 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
| 848 size = partitionDirectMapSize(size); | 853 size = PartitionDirectMapSize(size); |
| 849 } | 854 } |
| 850 return partitionCookieSizeAdjustSubtract(size); | 855 return PartitionCookieSizeAdjustSubtract(size); |
| 851 #endif | 856 #endif |
| 852 } | 857 } |
| 853 | 858 |
| 854 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { | 859 ALWAYS_INLINE bool PartitionAllocSupportsGetSize() { |
| 855 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 860 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 856 return false; | 861 return false; |
| 857 #else | 862 #else |
| 858 return true; | 863 return true; |
| 859 #endif | 864 #endif |
| 860 } | 865 } |
| 861 | 866 |
| 862 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { | 867 ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) { |
| 863 // No need to lock here. Only 'ptr' being freed by another thread could | 868 // No need to lock here. Only |ptr| being freed by another thread could |
| 864 // cause trouble, and the caller is responsible for that not happening. | 869 // cause trouble, and the caller is responsible for that not happening. |
| 865 DCHECK(partitionAllocSupportsGetSize()); | 870 DCHECK(PartitionAllocSupportsGetSize()); |
| 866 ptr = partitionCookieFreePointerAdjust(ptr); | 871 ptr = PartitionCookieFreePointerAdjust(ptr); |
| 867 DCHECK(partitionPointerIsValid(ptr)); | 872 DCHECK(PartitionPointerIsValid(ptr)); |
| 868 PartitionPage* page = partitionPointerToPage(ptr); | 873 PartitionPage* page = PartitionPointerToPage(ptr); |
| 869 size_t size = page->bucket->slotSize; | 874 size_t size = page->bucket->slot_size; |
| 870 return partitionCookieSizeAdjustSubtract(size); | 875 return PartitionCookieSizeAdjustSubtract(size); |
| 871 } | 876 } |
| 872 | 877 |
| 873 // N (or more accurately, N - sizeof(void*)) represents the largest size in | 878 // N (or more accurately, N - sizeof(void*)) represents the largest size in |
| 874 // bytes that will be handled by a SizeSpecificPartitionAllocator. | 879 // bytes that will be handled by a SizeSpecificPartitionAllocator. |
| 875 // Attempts to partitionAlloc() more than this amount will fail. | 880 // Attempts to partitionAlloc() more than this amount will fail. |
| 876 template <size_t N> | 881 template <size_t N> |
| 877 class SizeSpecificPartitionAllocator { | 882 class SizeSpecificPartitionAllocator { |
| 878 public: | 883 public: |
| 879 static const size_t kMaxAllocation = N - kAllocationGranularity; | 884 static const size_t kMaxAllocation = N - kAllocationGranularity; |
| 880 static const size_t kNumBuckets = N / kAllocationGranularity; | 885 static const size_t kNumBuckets = N / kAllocationGranularity; |
| 881 void init() { | 886 void init() { |
| 882 partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); | 887 PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation); |
| 883 } | 888 } |
| 884 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); } | 889 bool shutdown() { return PartitionAllocShutdown(&partition_root_); } |
| 885 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; } | 890 ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; } |
| 886 | 891 |
| 887 private: | 892 private: |
| 888 PartitionRoot m_partitionRoot; | 893 PartitionRoot partition_root_; |
| 889 PartitionBucket m_actualBuckets[kNumBuckets]; | 894 PartitionBucket actual_buckets_[kNumBuckets]; |
| 890 }; | 895 }; |
| 891 | 896 |
| 892 class PartitionAllocatorGeneric { | 897 class PartitionAllocatorGeneric { |
| 893 public: | 898 public: |
| 894 void init() { partitionAllocGenericInit(&m_partitionRoot); } | 899 void init() { PartitionAllocGenericInit(&partition_root_); } |
| 895 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } | 900 bool shutdown() { return PartitionAllocGenericShutdown(&partition_root_); } |
| 896 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } | 901 ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; } |
| 897 | 902 |
| 898 private: | 903 private: |
| 899 PartitionRootGeneric m_partitionRoot; | 904 PartitionRootGeneric partition_root_; |
| 900 }; | 905 }; |
| 901 | 906 |
| 902 } // namespace base | 907 } // namespace base |
| 903 | 908 |
| 904 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H | 909 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| OLD | NEW |