Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(179)

Side by Side Diff: third_party/WebKit/Source/wtf/PartitionAlloc.h

Issue 1881983003: Move PartitionAlloc related things into wtf/allocator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef WTF_PartitionAlloc_h
32 #define WTF_PartitionAlloc_h
33
34 // DESCRIPTION
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() /
36 // partitionFreeGeneric() are approximately analagous to malloc() and free().
37 //
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object
39 // must be supplied to these functions, representing a specific "heap partition"
40 // that will be used to satisfy the allocation. Different partitions are
41 // guaranteed to exist in separate address spaces, including being separate from
42 // the main system heap. If the contained objects are all freed, physical memory
43 // is returned to the system but the address space remains reserved.
44 // See PartitionAlloc.md for other security properties PartitionAlloc provides.
45 //
46 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
47 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
48 // minimize the instruction count to the fullest extent possible, the
49 // PartitionRoot is really just a header adjacent to other data areas provided
50 // by the allocator class.
51 //
52 // The partitionAlloc() variant of the API has the following caveats:
53 // - Allocations and frees against a single partition must be single threaded.
54 // - Allocations must not exceed a max size, chosen at compile-time via a
55 // templated parameter to PartitionAllocator.
56 // - Allocation sizes must be aligned to the system pointer size.
57 // - Allocations are bucketed exactly according to size.
58 //
59 // And for partitionAllocGeneric():
60 // - Multi-threaded use against a single partition is ok; locking is handled.
61 // - Allocations of any arbitrary size can be handled (subject to a limit of
62 // INT_MAX bytes for security reasons).
63 // - Bucketing is by approximate size, for example an allocation of 4000 bytes
64 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
65 // keep worst-case waste to ~10%.
66 //
67 // The allocators are designed to be extremely fast, thanks to the following
68 // properties and design:
69 // - Just two single (reasonably predicatable) branches in the hot / fast path f or
70 // both allocating and (significantly) freeing.
71 // - A minimal number of operations in the hot / fast path, with the slow paths
72 // in separate functions, leading to the possibility of inlining.
73 // - Each partition page (which is usually multiple physical pages) has a
74 // metadata structure which allows fast mapping of free() address to an
75 // underlying bucket.
76 // - Supports a lock-free API for fast performance in single-threaded cases.
77 // - The freelist for a given bucket is split across a number of partition
78 // pages, enabling various simple tricks to try and minimize fragmentation.
79 // - Fine-grained bucket sizes leading to less waste and better packing.
80 //
81 // The following security properties could be investigated in the future:
82 // - Per-object bucketing (instead of per-size) is mostly available at the API,
83 // but not used yet.
84 // - No randomness of freelist entries or bucket position.
85 // - Better checking for wild pointers in free().
86 // - Better freelist masking function to guarantee fault on 32-bit.
87
88 #include "wtf/Assertions.h"
89 #include "wtf/BitwiseOperations.h"
90 #include "wtf/ByteSwap.h"
91 #include "wtf/CPU.h"
92 #include "wtf/PageAllocator.h"
93 #include "wtf/SpinLock.h"
94 #include "wtf/TypeTraits.h"
95
96 #include <limits.h>
97
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
99 #include <stdlib.h>
100 #endif
101
102 #if ENABLE(ASSERT)
103 #include <string.h>
104 #endif
105
106 namespace WTF {
107
108 // Allocation granularity of sizeof(void*) bytes.
109 static const size_t kAllocationGranularity = sizeof(void*);
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
112
113 // Underlying partition storage pages are a power-of-two size. It is typical
114 // for a partition page to be based on multiple system pages. Most references to
115 // "page" refer to partition pages.
116 // We also have the concept of "super pages" -- these are the underlying system
117 // allocations we make. Super pages contain multiple partition pages inside them
118 // and include space for a small amount of metadata per partition page.
119 // Inside super pages, we store "slot spans". A slot span is a continguous range
120 // of one or more partition pages that stores allocations of the same size.
121 // Slot span sizes are adjusted depending on the allocation size, to make sure
122 // the packing does not lead to unused (wasted) space at the end of the last
123 // system page of the span. For our current max slot span size of 64k and other
124 // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up
125 // against the end of a system page.
126 static const size_t kPartitionPageShift = 14; // 16KB
127 static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
128 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
129 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
130 static const size_t kMaxPartitionPagesPerSlotSpan = 4;
131
132 // To avoid fragmentation via never-used freelist entries, we hand out partition
133 // freelist sections gradually, in units of the dominant system page size.
134 // What we're actually doing is avoiding filling the full partition page (16 KB)
135 // with freelist pointers right away. Writing freelist pointers will fault and
136 // dirty a private page, which is very wasteful if we never actually store
137 // objects there.
138 static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSyst emPageSize;
139 static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
140
141 // We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
142 // These chunks are called "super pages". We do this so that we can store
143 // metadata in the first few pages of each 2MB aligned section. This leads to
144 // a very fast free(). We specifically choose 2MB because this virtual address
145 // block represents a full but single PTE allocation on ARM, ia32 and x64.
146 //
147 // The layout of the super page is as follows. The sizes below are the same
148 // for 32 bit and 64 bit.
149 //
150 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Slot span | Slot span | ... | Slot span | Guard page (4KB) |
151 //
152 // - Each slot span is a contiguous range of one or more PartitionPages.
153 // - The metadata page has the following format. Note that the PartitionPage
154 // that is not at the head of a slot span is "unused". In other words,
155 // the metadata for the slot span is stored only in the first PartitionPage
156 // of the slot span. Metadata accesses to other PartitionPages are
157 // redirected to the first PartitionPage.
158 //
159 // | SuperPageExtentEntry (32B) | PartitionPage of slot span 1 (32B, used) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 2 (32B, used) | PartitionPage of slot span 3 (32B, used) | ... | PartitionPage of slot span N (32B, unused) |
160 //
161 // A direct mapped page has a similar layout to fake it looking like a super pag e:
162 //
163 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Direct map ped object | Guard page (4KB) |
164 //
165 // - The metadata page has the following layout:
166 //
167 // | SuperPageExtentEntry (32B) | PartitionPage (32B) | PartitionBucket (32B ) | PartitionDirectMapExtent (8B) |
168 static const size_t kSuperPageShift = 21; // 2MB
169 static const size_t kSuperPageSize = 1 << kSuperPageShift;
170 static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
171 static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
172 static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartition PageSize;
173
174 static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
175 static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
176
177 // The following kGeneric* constants apply to the generic variants of the API.
178 // The "order" of an allocation is closely related to the power-of-two size of
179 // the allocation. More precisely, the order is the bit index of the
180 // most-significant-bit in the allocation size, where the bit numbers starts
181 // at index 1 for the least-significant-bit.
182 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
183 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
184 static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
185 static const size_t kGenericMaxBucketedOrder = 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
186 static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGen ericMinBucketedOrder) + 1;
187 static const size_t kGenericNumBucketsPerOrderBits = 3; // Eight buckets per ord er (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240
188 static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrder Bits;
189 static const size_t kGenericNumBuckets = kGenericNumBucketedOrders * kGenericNum BucketsPerOrder;
190 static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1) ;
191 static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
192 static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
193 static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; / / Limit when downsizing a direct mapping using realloc().
194 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
195 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT;
196
197 // Constants for the memory reclaim logic.
198 static const size_t kMaxFreeableSpans = 16;
199
200 // If the total size in bytes of allocated but not committed pages exceeds this
201 // value (probably it is a "out of virtual address space" crash),
202 // a special crash stack trace is generated at |partitionOutOfMemory|.
203 // This is to distinguish "out of virtual address space" from
204 // "out of physical memory" in crash reports.
205 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
206
207 #if ENABLE(ASSERT)
208 // These two byte values match tcmalloc.
209 static const unsigned char kUninitializedByte = 0xAB;
210 static const unsigned char kFreedByte = 0xCD;
211 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction s on Intel.
212 static const unsigned char kCookieValue[kCookieSize] = { 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E };
213 #endif
214
215 struct PartitionBucket;
216 struct PartitionRootBase;
217
218 struct PartitionFreelistEntry {
219 PartitionFreelistEntry* next;
220 };
221
222 // Some notes on page states. A page can be in one of four major states:
223 // 1) Active.
224 // 2) Full.
225 // 3) Empty.
226 // 4) Decommitted.
227 // An active page has available free slots. A full page has no free slots. An
228 // empty page has no free slots, and a decommitted page is an empty page that
229 // had its backing memory released back to the system.
230 // There are two linked lists tracking the pages. The "active page" list is an
231 // approximation of a list of active pages. It is an approximation because
232 // full, empty and decommitted pages may briefly be present in the list until
233 // we next do a scan over it.
234 // The "empty page" list is an accurate list of pages which are either empty
235 // or decommitted.
236 //
237 // The significant page transitions are:
238 // - free() will detect when a full page has a slot free()'d and immediately
239 // return the page to the head of the active list.
240 // - free() will detect when a page is fully emptied. It _may_ add it to the
241 // empty list or it _may_ leave it on the active list until a future list scan.
242 // - malloc() _may_ scan the active page list in order to fulfil the request.
243 // If it does this, full, empty and decommitted pages encountered will be
244 // booted out of the active list. If there are no suitable active pages found,
245 // an empty or decommitted page (if one exists) will be pulled from the empty
246 // list on to the active list.
247 struct PartitionPage {
248 PartitionFreelistEntry* freelistHead;
249 PartitionPage* nextPage;
250 PartitionBucket* bucket;
251 int16_t numAllocatedSlots; // Deliberately signed, 0 for empty or decommitte d page, -n for full pages.
252 uint16_t numUnprovisionedSlots;
253 uint16_t pageOffset;
254 int16_t emptyCacheIndex; // -1 if not in the empty cache.
255 };
256
257 struct PartitionBucket {
258 PartitionPage* activePagesHead; // Accessed most in hot path => goes first.
259 PartitionPage* emptyPagesHead;
260 PartitionPage* decommittedPagesHead;
261 uint32_t slotSize;
262 unsigned numSystemPagesPerSlotSpan : 8;
263 unsigned numFullPages : 24;
264 };
265
266 // An "extent" is a span of consecutive superpages. We link to the partition's
267 // next extent (if there is one) at the very start of a superpage's metadata
268 // area.
269 struct PartitionSuperPageExtentEntry {
270 PartitionRootBase* root;
271 char* superPageBase;
272 char* superPagesEnd;
273 PartitionSuperPageExtentEntry* next;
274 };
275
276 struct PartitionDirectMapExtent {
277 PartitionDirectMapExtent* nextExtent;
278 PartitionDirectMapExtent* prevExtent;
279 PartitionBucket* bucket;
280 size_t mapSize; // Mapped size, not including guard pages and meta-data.
281 };
282
283 struct WTF_EXPORT PartitionRootBase {
284 size_t totalSizeOfCommittedPages;
285 size_t totalSizeOfSuperPages;
286 size_t totalSizeOfDirectMappedPages;
287 // Invariant: totalSizeOfCommittedPages <= totalSizeOfSuperPages + totalSize OfDirectMappedPages.
288 unsigned numBuckets;
289 unsigned maxAllocation;
290 bool initialized;
291 char* nextSuperPage;
292 char* nextPartitionPage;
293 char* nextPartitionPageEnd;
294 PartitionSuperPageExtentEntry* currentExtent;
295 PartitionSuperPageExtentEntry* firstExtent;
296 PartitionDirectMapExtent* directMapList;
297 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
298 int16_t globalEmptyPageRingIndex;
299 uintptr_t invertedSelf;
300
301 static SpinLock gInitializedLock;
302 static bool gInitialized;
303 // gSeedPage is used as a sentinel to indicate that there is no page
304 // in the active page list. We can use nullptr, but in that case we need
305 // to add a null-check branch to the hot allocation path. We want to avoid
306 // that.
307 static PartitionPage gSeedPage;
308 static PartitionBucket gPagedBucket;
309 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
310 static void (*gOomHandlingFunction)();
311 };
312
313 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
314 struct PartitionRoot : public PartitionRootBase {
315 // The PartitionAlloc templated class ensures the following is correct.
316 ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<Partition Bucket*>(this + 1); }
317 ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_ca st<const PartitionBucket*>(this + 1); }
318 };
319
320 // Never instantiate a PartitionRootGeneric directly, instead use PartitionAlloc atorGeneric.
321 struct PartitionRootGeneric : public PartitionRootBase {
322 SpinLock lock;
323 // Some pre-computed constants.
324 size_t orderIndexShifts[kBitsPerSizet + 1];
325 size_t orderSubIndexMasks[kBitsPerSizet + 1];
326 // The bucket lookup table lets us map a size_t to a bucket quickly.
327 // The trailing +1 caters for the overflow case for very large allocation si zes.
328 // It is one flat array instead of a 2D array because in the 2D world, we'd
329 // need to index array[blah][max+1] which risks undefined behavior.
330 PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerO rder) + 1];
331 PartitionBucket buckets[kGenericNumBuckets];
332 };
333
334 // Flags for partitionAllocGenericFlags.
335 enum PartitionAllocFlags {
336 PartitionAllocReturnNull = 1 << 0,
337 };
338
339 // Struct used to retrieve total memory usage of a partition. Used by
340 // PartitionStatsDumper implementation.
341 struct PartitionMemoryStats {
342 size_t totalMmappedBytes; // Total bytes mmaped from the system.
343 size_t totalCommittedBytes; // Total size of commmitted pages.
344 size_t totalResidentBytes; // Total bytes provisioned by the partition.
345 size_t totalActiveBytes; // Total active bytes in the partition.
346 size_t totalDecommittableBytes; // Total bytes that could be decommitted.
347 size_t totalDiscardableBytes; // Total bytes that could be discarded.
348 };
349
350 // Struct used to retrieve memory statistics about a partition bucket. Used by
351 // PartitionStatsDumper implementation.
352 struct PartitionBucketMemoryStats {
353 bool isValid; // Used to check if the stats is valid.
354 bool isDirectMap; // True if this is a direct mapping; size will not be uniq ue.
355 uint32_t bucketSlotSize; // The size of the slot in bytes.
356 uint32_t allocatedPageSize; // Total size the partition page allocated from the system.
357 uint32_t activeBytes; // Total active bytes used in the bucket.
358 uint32_t residentBytes; // Total bytes provisioned in the bucket.
359 uint32_t decommittableBytes; // Total bytes that could be decommitted.
360 uint32_t discardableBytes; // Total bytes that could be discarded.
361 uint32_t numFullPages; // Number of pages with all slots allocated.
362 uint32_t numActivePages; // Number of pages that have at least one provision ed slot.
363 uint32_t numEmptyPages; // Number of pages that are empty but not decommitte d.
364 uint32_t numDecommittedPages; // Number of pages that are empty and decommit ted.
365 };
366
367 // Interface that is passed to partitionDumpStats and
368 // partitionDumpStatsGeneric for using the memory statistics.
369 class WTF_EXPORT PartitionStatsDumper {
370 public:
371 // Called to dump total memory used by partition, once per partition.
372 virtual void partitionDumpTotals(const char* partitionName, const PartitionM emoryStats*) = 0;
373
374 // Called to dump stats about buckets, for each bucket.
375 virtual void partitionsDumpBucketStats(const char* partitionName, const Part itionBucketMemoryStats*) = 0;
376 };
377
378 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)());
379 WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t max Allocation);
380 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*);
381 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
382 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
383
384 enum PartitionPurgeFlags {
385 // Decommitting the ring list of empty pages is reasonably fast.
386 PartitionPurgeDecommitEmptyPages = 1 << 0,
387 // Discarding unused system pages is slower, because it involves walking all
388 // freelists in all active partition pages of all buckets >= system page
389 // size. It often frees a similar amount of memory to decommitting the empty
390 // pages, though.
391 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
392 };
393
394 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
395 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
396
397 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, si ze_t, PartitionBucket*);
398 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
399 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, voi d*, size_t, const char* typeName);
400
401 WTF_EXPORT void partitionDumpStats(PartitionRoot*, const char* partitionName, bo ol isLightDump, PartitionStatsDumper*);
402 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, const char* par titionName, bool isLightDump, PartitionStatsDumper*);
403
404 class WTF_EXPORT PartitionAllocHooks {
405 public:
406 typedef void AllocationHook(void* address, size_t, const char* typeName);
407 typedef void FreeHook(void* address);
408
409 static void setAllocationHook(AllocationHook* hook) { m_allocationHook = hoo k; }
410 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; }
411
412 static void allocationHookIfEnabled(void* address, size_t size, const char* typeName)
413 {
414 AllocationHook* allocationHook = m_allocationHook;
415 if (UNLIKELY(allocationHook != nullptr))
416 allocationHook(address, size, typeName);
417 }
418
419 static void freeHookIfEnabled(void* address)
420 {
421 FreeHook* freeHook = m_freeHook;
422 if (UNLIKELY(freeHook != nullptr))
423 freeHook(address);
424 }
425
426 static void reallocHookIfEnabled(void* oldAddress, void* newAddress, size_t size, const char* typeName)
427 {
428 // Report a reallocation as a free followed by an allocation.
429 AllocationHook* allocationHook = m_allocationHook;
430 FreeHook* freeHook = m_freeHook;
431 if (UNLIKELY(allocationHook && freeHook)) {
432 freeHook(oldAddress);
433 allocationHook(newAddress, size, typeName);
434 }
435 }
436
437 private:
438 // Pointers to hook functions that PartitionAlloc will call on allocation an d
439 // free if the pointers are non-null.
440 static AllocationHook* m_allocationHook;
441 static FreeHook* m_freeHook;
442 };
443
444 // In official builds, do not include type info string literals to avoid
445 // bloating the binary.
446 #if defined(OFFICIAL_BUILD)
447 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr
448 #else
449 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>()
450 #endif
451
452 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEnt ry* ptr)
453 {
454 // We use bswap on little endian as a fast mask for two reasons:
455 // 1) If an object is freed and its vtable used where the attacker doesn't
456 // get the chance to run allocations between the free and use, the vtable
457 // dereference is likely to fault.
458 // 2) If the attacker has a linear buffer overflow and elects to try and
459 // corrupt a freelist pointer, partial pointer overwrite attacks are
460 // thwarted.
461 // For big endian, similar guarantees are arrived at with a negation.
462 #if CPU(BIG_ENDIAN)
463 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
464 #else
465 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
466 #endif
467 return reinterpret_cast<PartitionFreelistEntry*>(masked);
468 }
469
470 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size)
471 {
472 #if ENABLE(ASSERT)
473 // Add space for cookies, checking for integer overflow.
474 ASSERT(size + (2 * kCookieSize) > size);
475 size += 2 * kCookieSize;
476 #endif
477 return size;
478 }
479
480 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size)
481 {
482 #if ENABLE(ASSERT)
483 // Remove space for cookies.
484 ASSERT(size >= 2 * kCookieSize);
485 size -= 2 * kCookieSize;
486 #endif
487 return size;
488 }
489
490 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr)
491 {
492 #if ENABLE(ASSERT)
493 // The value given to the application is actually just after the cookie.
494 ptr = static_cast<char*>(ptr) - kCookieSize;
495 #endif
496 return ptr;
497 }
498
499 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr)
500 {
501 #if ENABLE(ASSERT)
502 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
503 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
504 *cookiePtr = kCookieValue[i];
505 #endif
506 }
507
508 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr)
509 {
510 #if ENABLE(ASSERT)
511 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
512 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
513 ASSERT(*cookiePtr == kCookieValue[i]);
514 #endif
515 }
516
517 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
518 {
519 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
520 ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
521 // The metadata area is exactly one system page (the guard page) into the
522 // super page.
523 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
524 }
525
526 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr)
527 {
528 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
529 char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseM ask);
530 uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPa rtitionPageShift;
531 // Index 0 is invalid because it is the metadata and guard area and
532 // the last index is invalid because it is a guard page.
533 ASSERT(partitionPageIndex);
534 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
535 PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToM etadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift));
536 // Partition pages in the same slot span can share the same page object. Adj ust for that.
537 size_t delta = page->pageOffset << kPageMetadataShift;
538 page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delt a);
539 return page;
540 }
541
542 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page)
543 {
544 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
545 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
546 ASSERT(superPageOffset > kSystemPageSize);
547 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
548 uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageM etadataShift;
549 // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
550 ASSERT(partitionPageIndex);
551 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
552 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
553 void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << k PartitionPageShift));
554 return ret;
555 }
556
557 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
558 {
559 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
560 // Checks that the pointer is a multiple of bucket size.
561 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(par titionPageToPointer(page))) % page->bucket->slotSize));
562 return page;
563 }
564
565 ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket)
566 {
567 return !bucket->numSystemPagesPerSlotSpan;
568 }
569
570 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket)
571 {
572 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
573 }
574
575 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket)
576 {
577 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize );
578 }
579
580 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page)
581 {
582 // For single-slot buckets which span more than one partition page, we
583 // have some spare metadata space to store the raw allocation size. We
584 // can use this to report better statistics.
585 PartitionBucket* bucket = page->bucket;
586 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
587 return nullptr;
588
589 ASSERT((bucket->slotSize % kSystemPageSize) == 0);
590 ASSERT(partitionBucketIsDirectMapped(bucket) || partitionBucketSlots(bucket) == 1);
591 page++;
592 return reinterpret_cast<size_t*>(&page->freelistHead);
593 }
594
595 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page)
596 {
597 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
598 if (UNLIKELY(rawSizePtr != nullptr))
599 return *rawSizePtr;
600 return 0;
601 }
602
603 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page)
604 {
605 PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuper PageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
606 return extentEntry->root;
607 }
608
609 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr)
610 {
611 PartitionPage* page = partitionPointerToPage(ptr);
612 PartitionRootBase* root = partitionPageToRoot(page);
613 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
614 }
615
616 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, siz e_t size, PartitionBucket* bucket)
617 {
618 PartitionPage* page = bucket->activePagesHead;
619 // Check that this page is neither full nor freed.
620 ASSERT(page->numAllocatedSlots >= 0);
621 void* ret = page->freelistHead;
622 if (LIKELY(ret != 0)) {
623 // If these asserts fire, you probably corrupted memory.
624 ASSERT(partitionPointerIsValid(ret));
625 // All large allocations must go through the slow path to correctly
626 // update the size metadata.
627 ASSERT(partitionPageGetRawSize(page) == 0);
628 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part itionFreelistEntry*>(ret)->next);
629 page->freelistHead = newHead;
630 page->numAllocatedSlots++;
631 } else {
632 ret = partitionAllocSlowPath(root, flags, size, bucket);
633 ASSERT(!ret || partitionPointerIsValid(ret));
634 }
635 #if ENABLE(ASSERT)
636 if (!ret)
637 return 0;
638 // Fill the uninitialized pattern, and write the cookies.
639 page = partitionPointerToPage(ret);
640 size_t slotSize = page->bucket->slotSize;
641 size_t rawSize = partitionPageGetRawSize(page);
642 if (rawSize) {
643 ASSERT(rawSize == size);
644 slotSize = rawSize;
645 }
646 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
647 char* charRet = static_cast<char*>(ret);
648 // The value given to the application is actually just after the cookie.
649 ret = charRet + kCookieSize;
650 memset(ret, kUninitializedByte, noCookieSize);
651 partitionCookieWriteValue(charRet);
652 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
653 #endif
654 return ret;
655 }
656
657 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size, const char* typeName)
658 {
659 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
660 void* result = malloc(size);
661 RELEASE_ASSERT(result);
662 return result;
663 #else
664 size_t requestedSize = size;
665 size = partitionCookieSizeAdjustAdd(size);
666 ASSERT(root->initialized);
667 size_t index = size >> kBucketShift;
668 ASSERT(index < root->numBuckets);
669 ASSERT(size == index << kBucketShift);
670 PartitionBucket* bucket = &root->buckets()[index];
671 void* result = partitionBucketAlloc(root, 0, size, bucket);
672 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName );
673 return result;
674 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
675 }
676
677 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page)
678 {
679 // If these asserts fire, you probably corrupted memory.
680 #if ENABLE(ASSERT)
681 size_t slotSize = page->bucket->slotSize;
682 size_t rawSize = partitionPageGetRawSize(page);
683 if (rawSize)
684 slotSize = rawSize;
685 partitionCookieCheckValue(ptr);
686 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - kCookieS ize);
687 memset(ptr, kFreedByte, slotSize);
688 #endif
689 ASSERT(page->numAllocatedSlots);
690 PartitionFreelistEntry* freelistHead = page->freelistHead;
691 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead));
692 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free.
693 ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMa sk(freelistHead->next)); // Look for double free one level deeper in debug.
694 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
695 entry->next = partitionFreelistMask(freelistHead);
696 page->freelistHead = entry;
697 --page->numAllocatedSlots;
698 if (UNLIKELY(page->numAllocatedSlots <= 0)) {
699 partitionFreeSlowPath(page);
700 } else {
701 // All single-slot allocations must go through the slow path to
702 // correctly update the size metadata.
703 ASSERT(partitionPageGetRawSize(page) == 0);
704 }
705 }
706
707 ALWAYS_INLINE void partitionFree(void* ptr)
708 {
709 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
710 free(ptr);
711 #else
712 PartitionAllocHooks::freeHookIfEnabled(ptr);
713 ptr = partitionCookieFreePointerAdjust(ptr);
714 ASSERT(partitionPointerIsValid(ptr));
715 PartitionPage* page = partitionPointerToPage(ptr);
716 partitionFreeWithPage(ptr, page);
717 #endif
718 }
719
720 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric * root, size_t size)
721 {
722 size_t order = kBitsPerSizet - countLeadingZerosSizet(size);
723 // The order index is simply the next few bits after the most significant bi t.
724 size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBu cketsPerOrder - 1);
725 // And if the remaining bits are non-zero we must bump the bucket up.
726 size_t subOrderIndex = size & root->orderSubIndexMasks[order];
727 PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPe rOrderBits) + orderIndex + !!subOrderIndex];
728 ASSERT(!bucket->slotSize || bucket->slotSize >= size);
729 ASSERT(!(bucket->slotSize % kGenericSmallestBucket));
730 return bucket;
731 }
732
733 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int f lags, size_t size, const char* typeName)
734 {
735 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
736 void* result = malloc(size);
737 RELEASE_ASSERT(result);
738 return result;
739 #else
740 ASSERT(root->initialized);
741 size_t requestedSize = size;
742 size = partitionCookieSizeAdjustAdd(size);
743 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
744 void* ret = nullptr;
745 {
746 SpinLock::Guard guard(root->lock);
747 // TODO(bashi): Remove following RELEAE_ASSERT()s once we find the cause of
748 // http://crbug.com/514141
749 #if OS(ANDROID)
750 RELEASE_ASSERT(bucket >= &root->buckets[0] || bucket == &PartitionRootGe neric::gPagedBucket);
751 RELEASE_ASSERT(bucket <= &root->buckets[kGenericNumBuckets - 1] || bucke t == &PartitionRootGeneric::gPagedBucket);
752 RELEASE_ASSERT(root->initialized);
753 #endif
754 ret = partitionBucketAlloc(root, flags, size, bucket);
755 }
756 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName);
757 return ret;
758 #endif
759 }
760
761 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t siz e, const char* typeName)
762 {
763 return partitionAllocGenericFlags(root, 0, size, typeName);
764 }
765
766 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr)
767 {
768 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
769 free(ptr);
770 #else
771 ASSERT(root->initialized);
772
773 if (UNLIKELY(!ptr))
774 return;
775
776 PartitionAllocHooks::freeHookIfEnabled(ptr);
777 ptr = partitionCookieFreePointerAdjust(ptr);
778 ASSERT(partitionPointerIsValid(ptr));
779 PartitionPage* page = partitionPointerToPage(ptr);
780 {
781 SpinLock::Guard guard(root->lock);
782 partitionFreeWithPage(ptr, page);
783 }
784 #endif
785 }
786
787 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
788 {
789 // Caller must check that the size is not above the kGenericMaxDirectMapped
790 // limit before calling. This also guards against integer overflow in the
791 // calculation here.
792 ASSERT(size <= kGenericMaxDirectMapped);
793 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
794 }
795
796 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
797 {
798 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
799 return size;
800 #else
801 ASSERT(root->initialized);
802 size = partitionCookieSizeAdjustAdd(size);
803 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
804 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
805 size = bucket->slotSize;
806 } else if (size > kGenericMaxDirectMapped) {
807 // Too large to allocate => return the size unchanged.
808 } else {
809 ASSERT(bucket == &PartitionRootBase::gPagedBucket);
810 size = partitionDirectMapSize(size);
811 }
812 return partitionCookieSizeAdjustSubtract(size);
813 #endif
814 }
815
816 ALWAYS_INLINE bool partitionAllocSupportsGetSize()
817 {
818 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
819 return false;
820 #else
821 return true;
822 #endif
823 }
824
825 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr)
826 {
827 // No need to lock here. Only 'ptr' being freed by another thread could
828 // cause trouble, and the caller is responsible for that not happening.
829 ASSERT(partitionAllocSupportsGetSize());
830 ptr = partitionCookieFreePointerAdjust(ptr);
831 ASSERT(partitionPointerIsValid(ptr));
832 PartitionPage* page = partitionPointerToPage(ptr);
833 size_t size = page->bucket->slotSize;
834 return partitionCookieSizeAdjustSubtract(size);
835 }
836
837 // N (or more accurately, N - sizeof(void*)) represents the largest size in
838 // bytes that will be handled by a SizeSpecificPartitionAllocator.
839 // Attempts to partitionAlloc() more than this amount will fail.
840 template <size_t N>
841 class SizeSpecificPartitionAllocator {
842 public:
843 static const size_t kMaxAllocation = N - kAllocationGranularity;
844 static const size_t kNumBuckets = N / kAllocationGranularity;
845 void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocati on); }
846 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
847 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
848 private:
849 PartitionRoot m_partitionRoot;
850 PartitionBucket m_actualBuckets[kNumBuckets];
851 };
852
853 class PartitionAllocatorGeneric {
854 public:
855 void init() { partitionAllocGenericInit(&m_partitionRoot); }
856 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
857 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
858 private:
859 PartitionRootGeneric m_partitionRoot;
860 };
861
862 } // namespace WTF
863
864 using WTF::SizeSpecificPartitionAllocator;
865 using WTF::PartitionAllocatorGeneric;
866 using WTF::PartitionRoot;
867 using WTF::partitionAllocInit;
868 using WTF::partitionAllocShutdown;
869 using WTF::partitionAlloc;
870 using WTF::partitionFree;
871 using WTF::partitionAllocGeneric;
872 using WTF::partitionFreeGeneric;
873 using WTF::partitionReallocGeneric;
874 using WTF::partitionAllocActualSize;
875 using WTF::partitionAllocSupportsGetSize;
876 using WTF::partitionAllocGetSize;
877
878 #endif // WTF_PartitionAlloc_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698