OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | |
3 * | |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 */ | |
30 | |
31 #ifndef WTF_PartitionAlloc_h | |
32 #define WTF_PartitionAlloc_h | |
33 | |
34 // DESCRIPTION | |
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / | |
36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). | |
37 // | |
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object | |
39 // must be supplied to these functions, representing a specific "heap partition" | |
40 // that will be used to satisfy the allocation. Different partitions are | |
41 // guaranteed to exist in separate address spaces, including being separate from | |
42 // the main system heap. If the contained objects are all freed, physical memory | |
43 // is returned to the system but the address space remains reserved. | |
44 // See PartitionAlloc.md for other security properties PartitionAlloc provides. | |
45 // | |
46 // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE | |
47 // SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To | |
48 // minimize the instruction count to the fullest extent possible, the | |
49 // PartitionRoot is really just a header adjacent to other data areas provided | |
50 // by the allocator class. | |
51 // | |
52 // The partitionAlloc() variant of the API has the following caveats: | |
53 // - Allocations and frees against a single partition must be single threaded. | |
54 // - Allocations must not exceed a max size, chosen at compile-time via a | |
55 // templated parameter to PartitionAllocator. | |
56 // - Allocation sizes must be aligned to the system pointer size. | |
57 // - Allocations are bucketed exactly according to size. | |
58 // | |
59 // And for partitionAllocGeneric(): | |
60 // - Multi-threaded use against a single partition is ok; locking is handled. | |
61 // - Allocations of any arbitrary size can be handled (subject to a limit of | |
62 // INT_MAX bytes for security reasons). | |
63 // - Bucketing is by approximate size, for example an allocation of 4000 bytes | |
64 // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and | |
65 // keep worst-case waste to ~10%. | |
66 // | |
67 // The allocators are designed to be extremely fast, thanks to the following | |
68 // properties and design: | |
69 // - Just two single (reasonably predicatable) branches in the hot / fast path | |
70 // for both allocating and (significantly) freeing. | |
71 // - A minimal number of operations in the hot / fast path, with the slow paths | |
72 // in separate functions, leading to the possibility of inlining. | |
73 // - Each partition page (which is usually multiple physical pages) has a | |
74 // metadata structure which allows fast mapping of free() address to an | |
75 // underlying bucket. | |
76 // - Supports a lock-free API for fast performance in single-threaded cases. | |
77 // - The freelist for a given bucket is split across a number of partition | |
78 // pages, enabling various simple tricks to try and minimize fragmentation. | |
79 // - Fine-grained bucket sizes leading to less waste and better packing. | |
80 // | |
81 // The following security properties could be investigated in the future: | |
82 // - Per-object bucketing (instead of per-size) is mostly available at the API, | |
83 // but not used yet. | |
84 // - No randomness of freelist entries or bucket position. | |
85 // - Better checking for wild pointers in free(). | |
86 // - Better freelist masking function to guarantee fault on 32-bit. | |
87 | |
88 #include "wtf/Assertions.h" | |
89 #include "wtf/BitwiseOperations.h" | |
90 #include "wtf/ByteSwap.h" | |
91 #include "wtf/CPU.h" | |
92 #include "wtf/SpinLock.h" | |
93 #include "wtf/TypeTraits.h" | |
94 #include "wtf/allocator/PageAllocator.h" | |
95 | |
96 #include <limits.h> | |
97 | |
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
99 #include <stdlib.h> | |
100 #endif | |
101 | |
102 #if ENABLE(ASSERT) | |
103 #include <string.h> | |
104 #endif | |
105 | |
106 namespace WTF { | |
107 | |
108 // Allocation granularity of sizeof(void*) bytes. | |
109 static const size_t kAllocationGranularity = sizeof(void*); | |
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; | |
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; | |
112 | |
113 // Underlying partition storage pages are a power-of-two size. It is typical | |
114 // for a partition page to be based on multiple system pages. Most references to | |
115 // "page" refer to partition pages. | |
116 // We also have the concept of "super pages" -- these are the underlying system | |
117 // allocations we make. Super pages contain multiple partition pages inside them | |
118 // and include space for a small amount of metadata per partition page. | |
119 // Inside super pages, we store "slot spans". A slot span is a continguous range | |
120 // of one or more partition pages that stores allocations of the same size. | |
121 // Slot span sizes are adjusted depending on the allocation size, to make sure | |
122 // the packing does not lead to unused (wasted) space at the end of the last | |
123 // system page of the span. For our current max slot span size of 64k and other | |
124 // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up | |
125 // against the end of a system page. | |
126 static const size_t kPartitionPageShift = 14; // 16KB | |
127 static const size_t kPartitionPageSize = 1 << kPartitionPageShift; | |
128 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; | |
129 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; | |
130 static const size_t kMaxPartitionPagesPerSlotSpan = 4; | |
131 | |
132 // To avoid fragmentation via never-used freelist entries, we hand out partition | |
133 // freelist sections gradually, in units of the dominant system page size. | |
134 // What we're actually doing is avoiding filling the full partition page (16 KB) | |
135 // with freelist pointers right away. Writing freelist pointers will fault and | |
136 // dirty a private page, which is very wasteful if we never actually store | |
137 // objects there. | |
138 static const size_t kNumSystemPagesPerPartitionPage = | |
139 kPartitionPageSize / kSystemPageSize; | |
140 static const size_t kMaxSystemPagesPerSlotSpan = | |
141 kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan; | |
142 | |
143 // We reserve virtual address space in 2MB chunks (aligned to 2MB as well). | |
144 // These chunks are called "super pages". We do this so that we can store | |
145 // metadata in the first few pages of each 2MB aligned section. This leads to | |
146 // a very fast free(). We specifically choose 2MB because this virtual address | |
147 // block represents a full but single PTE allocation on ARM, ia32 and x64. | |
148 // | |
149 // The layout of the super page is as follows. The sizes below are the same | |
150 // for 32 bit and 64 bit. | |
151 // | |
152 // | Guard page (4KB) | | |
153 // | Metadata page (4KB) | | |
154 // | Guard pages (8KB) | | |
155 // | Slot span | | |
156 // | Slot span | | |
157 // | ... | | |
158 // | Slot span | | |
159 // | Guard page (4KB) | | |
160 // | |
161 // - Each slot span is a contiguous range of one or more PartitionPages. | |
162 // - The metadata page has the following format. Note that the PartitionPage | |
163 // that is not at the head of a slot span is "unused". In other words, | |
164 // the metadata for the slot span is stored only in the first PartitionPage | |
165 // of the slot span. Metadata accesses to other PartitionPages are | |
166 // redirected to the first PartitionPage. | |
167 // | |
168 // | SuperPageExtentEntry (32B) | | |
169 // | PartitionPage of slot span 1 (32B, used) | | |
170 // | PartitionPage of slot span 1 (32B, unused) | | |
171 // | PartitionPage of slot span 1 (32B, unused) | | |
172 // | PartitionPage of slot span 2 (32B, used) | | |
173 // | PartitionPage of slot span 3 (32B, used) | | |
174 // | ... | | |
175 // | PartitionPage of slot span N (32B, unused) | | |
176 // | |
177 // A direct mapped page has a similar layout to fake it looking like a super | |
178 // page: | |
179 // | |
180 // | Guard page (4KB) | | |
181 // | Metadata page (4KB) | | |
182 // | Guard pages (8KB) | | |
183 // | Direct mapped object | | |
184 // | Guard page (4KB) | | |
185 // | |
186 // - The metadata page has the following layout: | |
187 // | |
188 // | SuperPageExtentEntry (32B) | | |
189 // | PartitionPage (32B) | | |
190 // | PartitionBucket (32B) | | |
191 // | PartitionDirectMapExtent (8B) | | |
192 static const size_t kSuperPageShift = 21; // 2MB | |
193 static const size_t kSuperPageSize = 1 << kSuperPageShift; | |
194 static const size_t kSuperPageOffsetMask = kSuperPageSize - 1; | |
195 static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; | |
196 static const size_t kNumPartitionPagesPerSuperPage = | |
197 kSuperPageSize / kPartitionPageSize; | |
198 | |
199 static const size_t kPageMetadataShift = 5; // 32 bytes per partition page. | |
200 static const size_t kPageMetadataSize = 1 << kPageMetadataShift; | |
201 | |
202 // The following kGeneric* constants apply to the generic variants of the API. | |
203 // The "order" of an allocation is closely related to the power-of-two size of | |
204 // the allocation. More precisely, the order is the bit index of the | |
205 // most-significant-bit in the allocation size, where the bit numbers starts | |
206 // at index 1 for the least-significant-bit. | |
207 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2 | |
208 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15. | |
209 static const size_t kGenericMinBucketedOrder = 4; // 8 bytes. | |
210 static const size_t kGenericMaxBucketedOrder = | |
211 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB) | |
212 static const size_t kGenericNumBucketedOrders = | |
213 (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1; | |
214 // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, | |
215 // 160, ..., 240: | |
216 static const size_t kGenericNumBucketsPerOrderBits = 3; | |
217 static const size_t kGenericNumBucketsPerOrder = | |
218 1 << kGenericNumBucketsPerOrderBits; | |
219 static const size_t kGenericNumBuckets = | |
220 kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; | |
221 static const size_t kGenericSmallestBucket = 1 | |
222 << (kGenericMinBucketedOrder - 1); | |
223 static const size_t kGenericMaxBucketSpacing = | |
224 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); | |
225 static const size_t kGenericMaxBucketed = | |
226 (1 << (kGenericMaxBucketedOrder - 1)) + | |
227 ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); | |
228 static const size_t kGenericMinDirectMappedDownsize = | |
229 kGenericMaxBucketed + | |
230 1; // Limit when downsizing a direct mapping using realloc(). | |
231 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; | |
232 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; | |
233 | |
234 // Constants for the memory reclaim logic. | |
235 static const size_t kMaxFreeableSpans = 16; | |
236 | |
237 // If the total size in bytes of allocated but not committed pages exceeds this | |
238 // value (probably it is a "out of virtual address space" crash), | |
239 // a special crash stack trace is generated at |partitionOutOfMemory|. | |
240 // This is to distinguish "out of virtual address space" from | |
241 // "out of physical memory" in crash reports. | |
242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB | |
243 | |
244 #if ENABLE(ASSERT) | |
245 // These two byte values match tcmalloc. | |
246 static const unsigned char kUninitializedByte = 0xAB; | |
247 static const unsigned char kFreedByte = 0xCD; | |
248 static const size_t kCookieSize = | |
249 16; // Handles alignment up to XMM instructions on Intel. | |
250 static const unsigned char kCookieValue[kCookieSize] = { | |
251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, | |
252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; | |
253 #endif | |
254 | |
255 struct PartitionBucket; | |
256 struct PartitionRootBase; | |
257 | |
258 struct PartitionFreelistEntry { | |
259 PartitionFreelistEntry* next; | |
260 }; | |
261 | |
262 // Some notes on page states. A page can be in one of four major states: | |
263 // 1) Active. | |
264 // 2) Full. | |
265 // 3) Empty. | |
266 // 4) Decommitted. | |
267 // An active page has available free slots. A full page has no free slots. An | |
268 // empty page has no free slots, and a decommitted page is an empty page that | |
269 // had its backing memory released back to the system. | |
270 // There are two linked lists tracking the pages. The "active page" list is an | |
271 // approximation of a list of active pages. It is an approximation because | |
272 // full, empty and decommitted pages may briefly be present in the list until | |
273 // we next do a scan over it. | |
274 // The "empty page" list is an accurate list of pages which are either empty | |
275 // or decommitted. | |
276 // | |
277 // The significant page transitions are: | |
278 // - free() will detect when a full page has a slot free()'d and immediately | |
279 // return the page to the head of the active list. | |
280 // - free() will detect when a page is fully emptied. It _may_ add it to the | |
281 // empty list or it _may_ leave it on the active list until a future list scan. | |
282 // - malloc() _may_ scan the active page list in order to fulfil the request. | |
283 // If it does this, full, empty and decommitted pages encountered will be | |
284 // booted out of the active list. If there are no suitable active pages found, | |
285 // an empty or decommitted page (if one exists) will be pulled from the empty | |
286 // list on to the active list. | |
287 struct PartitionPage { | |
288 PartitionFreelistEntry* freelistHead; | |
289 PartitionPage* nextPage; | |
290 PartitionBucket* bucket; | |
291 // Deliberately signed, 0 for empty or decommitted page, -n for full pages: | |
292 int16_t numAllocatedSlots; | |
293 uint16_t numUnprovisionedSlots; | |
294 uint16_t pageOffset; | |
295 int16_t emptyCacheIndex; // -1 if not in the empty cache. | |
296 }; | |
297 | |
298 struct PartitionBucket { | |
299 PartitionPage* activePagesHead; // Accessed most in hot path => goes first. | |
300 PartitionPage* emptyPagesHead; | |
301 PartitionPage* decommittedPagesHead; | |
302 uint32_t slotSize; | |
303 unsigned numSystemPagesPerSlotSpan : 8; | |
304 unsigned numFullPages : 24; | |
305 }; | |
306 | |
307 // An "extent" is a span of consecutive superpages. We link to the partition's | |
308 // next extent (if there is one) at the very start of a superpage's metadata | |
309 // area. | |
310 struct PartitionSuperPageExtentEntry { | |
311 PartitionRootBase* root; | |
312 char* superPageBase; | |
313 char* superPagesEnd; | |
314 PartitionSuperPageExtentEntry* next; | |
315 }; | |
316 | |
317 struct PartitionDirectMapExtent { | |
318 PartitionDirectMapExtent* nextExtent; | |
319 PartitionDirectMapExtent* prevExtent; | |
320 PartitionBucket* bucket; | |
321 size_t mapSize; // Mapped size, not including guard pages and meta-data. | |
322 }; | |
323 | |
324 struct WTF_EXPORT PartitionRootBase { | |
325 size_t totalSizeOfCommittedPages; | |
326 size_t totalSizeOfSuperPages; | |
327 size_t totalSizeOfDirectMappedPages; | |
328 // Invariant: totalSizeOfCommittedPages <= | |
329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. | |
330 unsigned numBuckets; | |
331 unsigned maxAllocation; | |
332 bool initialized; | |
333 char* nextSuperPage; | |
334 char* nextPartitionPage; | |
335 char* nextPartitionPageEnd; | |
336 PartitionSuperPageExtentEntry* currentExtent; | |
337 PartitionSuperPageExtentEntry* firstExtent; | |
338 PartitionDirectMapExtent* directMapList; | |
339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; | |
340 int16_t globalEmptyPageRingIndex; | |
341 uintptr_t invertedSelf; | |
342 | |
343 static SpinLock gInitializedLock; | |
344 static bool gInitialized; | |
345 // gSeedPage is used as a sentinel to indicate that there is no page | |
346 // in the active page list. We can use nullptr, but in that case we need | |
347 // to add a null-check branch to the hot allocation path. We want to avoid | |
348 // that. | |
349 static PartitionPage gSeedPage; | |
350 static PartitionBucket gPagedBucket; | |
351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. | |
352 static void (*gOomHandlingFunction)(); | |
353 }; | |
354 | |
355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. | |
356 struct PartitionRoot : public PartitionRootBase { | |
357 // The PartitionAlloc templated class ensures the following is correct. | |
358 ALWAYS_INLINE PartitionBucket* buckets() { | |
359 return reinterpret_cast<PartitionBucket*>(this + 1); | |
360 } | |
361 ALWAYS_INLINE const PartitionBucket* buckets() const { | |
362 return reinterpret_cast<const PartitionBucket*>(this + 1); | |
363 } | |
364 }; | |
365 | |
366 // Never instantiate a PartitionRootGeneric directly, instead use | |
367 // PartitionAllocatorGeneric. | |
368 struct PartitionRootGeneric : public PartitionRootBase { | |
369 SpinLock lock; | |
370 // Some pre-computed constants. | |
371 size_t orderIndexShifts[kBitsPerSizet + 1]; | |
372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; | |
373 // The bucket lookup table lets us map a size_t to a bucket quickly. | |
374 // The trailing +1 caters for the overflow case for very large allocation | |
375 // sizes. It is one flat array instead of a 2D array because in the 2D | |
376 // world, we'd need to index array[blah][max+1] which risks undefined | |
377 // behavior. | |
378 PartitionBucket* | |
379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; | |
380 PartitionBucket buckets[kGenericNumBuckets]; | |
381 }; | |
382 | |
383 // Flags for partitionAllocGenericFlags. | |
384 enum PartitionAllocFlags { | |
385 PartitionAllocReturnNull = 1 << 0, | |
386 }; | |
387 | |
388 // Struct used to retrieve total memory usage of a partition. Used by | |
389 // PartitionStatsDumper implementation. | |
390 struct PartitionMemoryStats { | |
391 size_t totalMmappedBytes; // Total bytes mmaped from the system. | |
392 size_t totalCommittedBytes; // Total size of commmitted pages. | |
393 size_t totalResidentBytes; // Total bytes provisioned by the partition. | |
394 size_t totalActiveBytes; // Total active bytes in the partition. | |
395 size_t totalDecommittableBytes; // Total bytes that could be decommitted. | |
396 size_t totalDiscardableBytes; // Total bytes that could be discarded. | |
397 }; | |
398 | |
399 // Struct used to retrieve memory statistics about a partition bucket. Used by | |
400 // PartitionStatsDumper implementation. | |
401 struct PartitionBucketMemoryStats { | |
402 bool isValid; // Used to check if the stats is valid. | |
403 bool isDirectMap; // True if this is a direct mapping; size will not be | |
404 // unique. | |
405 uint32_t bucketSlotSize; // The size of the slot in bytes. | |
406 uint32_t allocatedPageSize; // Total size the partition page allocated from | |
407 // the system. | |
408 uint32_t activeBytes; // Total active bytes used in the bucket. | |
409 uint32_t residentBytes; // Total bytes provisioned in the bucket. | |
410 uint32_t decommittableBytes; // Total bytes that could be decommitted. | |
411 uint32_t discardableBytes; // Total bytes that could be discarded. | |
412 uint32_t numFullPages; // Number of pages with all slots allocated. | |
413 uint32_t numActivePages; // Number of pages that have at least one | |
414 // provisioned slot. | |
415 uint32_t numEmptyPages; // Number of pages that are empty | |
416 // but not decommitted. | |
417 uint32_t numDecommittedPages; // Number of pages that are empty | |
418 // and decommitted. | |
419 }; | |
420 | |
421 // Interface that is passed to partitionDumpStats and | |
422 // partitionDumpStatsGeneric for using the memory statistics. | |
423 class WTF_EXPORT PartitionStatsDumper { | |
424 public: | |
425 // Called to dump total memory used by partition, once per partition. | |
426 virtual void partitionDumpTotals(const char* partitionName, | |
427 const PartitionMemoryStats*) = 0; | |
428 | |
429 // Called to dump stats about buckets, for each bucket. | |
430 virtual void partitionsDumpBucketStats(const char* partitionName, | |
431 const PartitionBucketMemoryStats*) = 0; | |
432 }; | |
433 | |
434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); | |
435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, | |
436 size_t numBuckets, | |
437 size_t maxAllocation); | |
438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); | |
439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); | |
440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); | |
441 | |
442 enum PartitionPurgeFlags { | |
443 // Decommitting the ring list of empty pages is reasonably fast. | |
444 PartitionPurgeDecommitEmptyPages = 1 << 0, | |
445 // Discarding unused system pages is slower, because it involves walking all | |
446 // freelists in all active partition pages of all buckets >= system page | |
447 // size. It often frees a similar amount of memory to decommitting the empty | |
448 // pages, though. | |
449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, | |
450 }; | |
451 | |
452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); | |
453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); | |
454 | |
455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, | |
456 int, | |
457 size_t, | |
458 PartitionBucket*); | |
459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); | |
460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, | |
461 void*, | |
462 size_t, | |
463 const char* typeName); | |
464 | |
465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, | |
466 const char* partitionName, | |
467 bool isLightDump, | |
468 PartitionStatsDumper*); | |
469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, | |
470 const char* partitionName, | |
471 bool isLightDump, | |
472 PartitionStatsDumper*); | |
473 | |
474 class WTF_EXPORT PartitionAllocHooks { | |
475 public: | |
476 typedef void AllocationHook(void* address, size_t, const char* typeName); | |
477 typedef void FreeHook(void* address); | |
478 | |
479 static void setAllocationHook(AllocationHook* hook) { | |
480 m_allocationHook = hook; | |
481 } | |
482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } | |
483 | |
484 static void allocationHookIfEnabled(void* address, | |
485 size_t size, | |
486 const char* typeName) { | |
487 AllocationHook* allocationHook = m_allocationHook; | |
488 if (UNLIKELY(allocationHook != nullptr)) | |
489 allocationHook(address, size, typeName); | |
490 } | |
491 | |
492 static void freeHookIfEnabled(void* address) { | |
493 FreeHook* freeHook = m_freeHook; | |
494 if (UNLIKELY(freeHook != nullptr)) | |
495 freeHook(address); | |
496 } | |
497 | |
498 static void reallocHookIfEnabled(void* oldAddress, | |
499 void* newAddress, | |
500 size_t size, | |
501 const char* typeName) { | |
502 // Report a reallocation as a free followed by an allocation. | |
503 AllocationHook* allocationHook = m_allocationHook; | |
504 FreeHook* freeHook = m_freeHook; | |
505 if (UNLIKELY(allocationHook && freeHook)) { | |
506 freeHook(oldAddress); | |
507 allocationHook(newAddress, size, typeName); | |
508 } | |
509 } | |
510 | |
511 private: | |
512 // Pointers to hook functions that PartitionAlloc will call on allocation and | |
513 // free if the pointers are non-null. | |
514 static AllocationHook* m_allocationHook; | |
515 static FreeHook* m_freeHook; | |
516 }; | |
517 | |
518 // In official builds, do not include type info string literals to avoid | |
519 // bloating the binary. | |
520 #if defined(OFFICIAL_BUILD) | |
521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr | |
522 #else | |
523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>() | |
524 #endif | |
525 | |
526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( | |
527 PartitionFreelistEntry* ptr) { | |
528 // We use bswap on little endian as a fast mask for two reasons: | |
529 // 1) If an object is freed and its vtable used where the attacker doesn't | |
530 // get the chance to run allocations between the free and use, the vtable | |
531 // dereference is likely to fault. | |
532 // 2) If the attacker has a linear buffer overflow and elects to try and | |
533 // corrupt a freelist pointer, partial pointer overwrite attacks are | |
534 // thwarted. | |
535 // For big endian, similar guarantees are arrived at with a negation. | |
536 #if CPU(BIG_ENDIAN) | |
537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); | |
538 #else | |
539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); | |
540 #endif | |
541 return reinterpret_cast<PartitionFreelistEntry*>(masked); | |
542 } | |
543 | |
544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { | |
545 #if ENABLE(ASSERT) | |
546 // Add space for cookies, checking for integer overflow. | |
547 ASSERT(size + (2 * kCookieSize) > size); | |
548 size += 2 * kCookieSize; | |
549 #endif | |
550 return size; | |
551 } | |
552 | |
553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { | |
554 #if ENABLE(ASSERT) | |
555 // Remove space for cookies. | |
556 ASSERT(size >= 2 * kCookieSize); | |
557 size -= 2 * kCookieSize; | |
558 #endif | |
559 return size; | |
560 } | |
561 | |
562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { | |
563 #if ENABLE(ASSERT) | |
564 // The value given to the application is actually just after the cookie. | |
565 ptr = static_cast<char*>(ptr) - kCookieSize; | |
566 #endif | |
567 return ptr; | |
568 } | |
569 | |
570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { | |
571 #if ENABLE(ASSERT) | |
572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | |
573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | |
574 *cookiePtr = kCookieValue[i]; | |
575 #endif | |
576 } | |
577 | |
578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { | |
579 #if ENABLE(ASSERT) | |
580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | |
581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | |
582 ASSERT(*cookiePtr == kCookieValue[i]); | |
583 #endif | |
584 } | |
585 | |
586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { | |
587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | |
588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); | |
589 // The metadata area is exactly one system page (the guard page) into the | |
590 // super page. | |
591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); | |
592 } | |
593 | |
594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { | |
595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | |
596 char* superPagePtr = | |
597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); | |
598 uintptr_t partitionPageIndex = | |
599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; | |
600 // Index 0 is invalid because it is the metadata and guard area and | |
601 // the last index is invalid because it is a guard page. | |
602 ASSERT(partitionPageIndex); | |
603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | |
604 PartitionPage* page = reinterpret_cast<PartitionPage*>( | |
605 partitionSuperPageToMetadataArea(superPagePtr) + | |
606 (partitionPageIndex << kPageMetadataShift)); | |
607 // Partition pages in the same slot span can share the same page object. | |
608 // Adjust for that. | |
609 size_t delta = page->pageOffset << kPageMetadataShift; | |
610 page = | |
611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); | |
612 return page; | |
613 } | |
614 | |
615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { | |
616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); | |
617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); | |
618 ASSERT(superPageOffset > kSystemPageSize); | |
619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * | |
620 kPageMetadataSize)); | |
621 uintptr_t partitionPageIndex = | |
622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; | |
623 // Index 0 is invalid because it is the metadata area and the last index is | |
624 // invalid because it is a guard page. | |
625 ASSERT(partitionPageIndex); | |
626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | |
627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); | |
628 void* ret = reinterpret_cast<void*>( | |
629 superPageBase + (partitionPageIndex << kPartitionPageShift)); | |
630 return ret; | |
631 } | |
632 | |
633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { | |
634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); | |
635 // Checks that the pointer is a multiple of bucket size. | |
636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - | |
637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % | |
638 page->bucket->slotSize)); | |
639 return page; | |
640 } | |
641 | |
642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( | |
643 const PartitionBucket* bucket) { | |
644 return !bucket->numSystemPagesPerSlotSpan; | |
645 } | |
646 | |
647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { | |
648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; | |
649 } | |
650 | |
651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { | |
652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); | |
653 } | |
654 | |
655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { | |
656 // For single-slot buckets which span more than one partition page, we | |
657 // have some spare metadata space to store the raw allocation size. We | |
658 // can use this to report better statistics. | |
659 PartitionBucket* bucket = page->bucket; | |
660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | |
661 return nullptr; | |
662 | |
663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); | |
664 ASSERT(partitionBucketIsDirectMapped(bucket) || | |
665 partitionBucketSlots(bucket) == 1); | |
666 page++; | |
667 return reinterpret_cast<size_t*>(&page->freelistHead); | |
668 } | |
669 | |
670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { | |
671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | |
672 if (UNLIKELY(rawSizePtr != nullptr)) | |
673 return *rawSizePtr; | |
674 return 0; | |
675 } | |
676 | |
677 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) { | |
678 PartitionSuperPageExtentEntry* extentEntry = | |
679 reinterpret_cast<PartitionSuperPageExtentEntry*>( | |
680 reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); | |
681 return extentEntry->root; | |
682 } | |
683 | |
684 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) { | |
685 PartitionPage* page = partitionPointerToPage(ptr); | |
686 PartitionRootBase* root = partitionPageToRoot(page); | |
687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); | |
688 } | |
689 | |
690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, | |
691 int flags, | |
692 size_t size, | |
693 PartitionBucket* bucket) { | |
694 PartitionPage* page = bucket->activePagesHead; | |
695 // Check that this page is neither full nor freed. | |
696 ASSERT(page->numAllocatedSlots >= 0); | |
697 void* ret = page->freelistHead; | |
698 if (LIKELY(ret != 0)) { | |
699 // If these asserts fire, you probably corrupted memory. | |
700 ASSERT(partitionPointerIsValid(ret)); | |
701 // All large allocations must go through the slow path to correctly | |
702 // update the size metadata. | |
703 ASSERT(partitionPageGetRawSize(page) == 0); | |
704 PartitionFreelistEntry* newHead = | |
705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); | |
706 page->freelistHead = newHead; | |
707 page->numAllocatedSlots++; | |
708 } else { | |
709 ret = partitionAllocSlowPath(root, flags, size, bucket); | |
710 ASSERT(!ret || partitionPointerIsValid(ret)); | |
711 } | |
712 #if ENABLE(ASSERT) | |
713 if (!ret) | |
714 return 0; | |
715 // Fill the uninitialized pattern, and write the cookies. | |
716 page = partitionPointerToPage(ret); | |
717 size_t slotSize = page->bucket->slotSize; | |
718 size_t rawSize = partitionPageGetRawSize(page); | |
719 if (rawSize) { | |
720 ASSERT(rawSize == size); | |
721 slotSize = rawSize; | |
722 } | |
723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); | |
724 char* charRet = static_cast<char*>(ret); | |
725 // The value given to the application is actually just after the cookie. | |
726 ret = charRet + kCookieSize; | |
727 memset(ret, kUninitializedByte, noCookieSize); | |
728 partitionCookieWriteValue(charRet); | |
729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); | |
730 #endif | |
731 return ret; | |
732 } | |
733 | |
734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, | |
735 size_t size, | |
736 const char* typeName) { | |
737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
738 void* result = malloc(size); | |
739 RELEASE_ASSERT(result); | |
740 return result; | |
741 #else | |
742 size_t requestedSize = size; | |
743 size = partitionCookieSizeAdjustAdd(size); | |
744 ASSERT(root->initialized); | |
745 size_t index = size >> kBucketShift; | |
746 ASSERT(index < root->numBuckets); | |
747 ASSERT(size == index << kBucketShift); | |
748 PartitionBucket* bucket = &root->buckets()[index]; | |
749 void* result = partitionBucketAlloc(root, 0, size, bucket); | |
750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); | |
751 return result; | |
752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
753 } | |
754 | |
755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { | |
756 // If these asserts fire, you probably corrupted memory. | |
757 #if ENABLE(ASSERT) | |
758 size_t slotSize = page->bucket->slotSize; | |
759 size_t rawSize = partitionPageGetRawSize(page); | |
760 if (rawSize) | |
761 slotSize = rawSize; | |
762 partitionCookieCheckValue(ptr); | |
763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - | |
764 kCookieSize); | |
765 memset(ptr, kFreedByte, slotSize); | |
766 #endif | |
767 ASSERT(page->numAllocatedSlots); | |
768 PartitionFreelistEntry* freelistHead = page->freelistHead; | |
769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); | |
770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. | |
771 // Look for double free one level deeper in debug. | |
772 SECURITY_DCHECK(!freelistHead || | |
773 ptr != partitionFreelistMask(freelistHead->next)); | |
774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); | |
775 entry->next = partitionFreelistMask(freelistHead); | |
776 page->freelistHead = entry; | |
777 --page->numAllocatedSlots; | |
778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { | |
779 partitionFreeSlowPath(page); | |
780 } else { | |
781 // All single-slot allocations must go through the slow path to | |
782 // correctly update the size metadata. | |
783 ASSERT(partitionPageGetRawSize(page) == 0); | |
784 } | |
785 } | |
786 | |
787 ALWAYS_INLINE void partitionFree(void* ptr) { | |
788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
789 free(ptr); | |
790 #else | |
791 PartitionAllocHooks::freeHookIfEnabled(ptr); | |
792 ptr = partitionCookieFreePointerAdjust(ptr); | |
793 ASSERT(partitionPointerIsValid(ptr)); | |
794 PartitionPage* page = partitionPointerToPage(ptr); | |
795 partitionFreeWithPage(ptr, page); | |
796 #endif | |
797 } | |
798 | |
799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( | |
800 PartitionRootGeneric* root, | |
801 size_t size) { | |
802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); | |
803 // The order index is simply the next few bits after the most significant bit. | |
804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & | |
805 (kGenericNumBucketsPerOrder - 1); | |
806 // And if the remaining bits are non-zero we must bump the bucket up. | |
807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; | |
808 PartitionBucket* bucket = | |
809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + | |
810 orderIndex + !!subOrderIndex]; | |
811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); | |
812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); | |
813 return bucket; | |
814 } | |
815 | |
816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, | |
817 int flags, | |
818 size_t size, | |
819 const char* typeName) { | |
820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
821 void* result = malloc(size); | |
822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); | |
823 return result; | |
824 #else | |
825 ASSERT(root->initialized); | |
826 size_t requestedSize = size; | |
827 size = partitionCookieSizeAdjustAdd(size); | |
828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | |
829 void* ret = nullptr; | |
830 { | |
831 SpinLock::Guard guard(root->lock); | |
832 ret = partitionBucketAlloc(root, flags, size, bucket); | |
833 } | |
834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); | |
835 return ret; | |
836 #endif | |
837 } | |
838 | |
839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, | |
840 size_t size, | |
841 const char* typeName) { | |
842 return partitionAllocGenericFlags(root, 0, size, typeName); | |
843 } | |
844 | |
845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { | |
846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
847 free(ptr); | |
848 #else | |
849 ASSERT(root->initialized); | |
850 | |
851 if (UNLIKELY(!ptr)) | |
852 return; | |
853 | |
854 PartitionAllocHooks::freeHookIfEnabled(ptr); | |
855 ptr = partitionCookieFreePointerAdjust(ptr); | |
856 ASSERT(partitionPointerIsValid(ptr)); | |
857 PartitionPage* page = partitionPointerToPage(ptr); | |
858 { | |
859 SpinLock::Guard guard(root->lock); | |
860 partitionFreeWithPage(ptr, page); | |
861 } | |
862 #endif | |
863 } | |
864 | |
865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { | |
866 // Caller must check that the size is not above the kGenericMaxDirectMapped | |
867 // limit before calling. This also guards against integer overflow in the | |
868 // calculation here. | |
869 ASSERT(size <= kGenericMaxDirectMapped); | |
870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | |
871 } | |
872 | |
873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, | |
874 size_t size) { | |
875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
876 return size; | |
877 #else | |
878 ASSERT(root->initialized); | |
879 size = partitionCookieSizeAdjustAdd(size); | |
880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | |
881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { | |
882 size = bucket->slotSize; | |
883 } else if (size > kGenericMaxDirectMapped) { | |
884 // Too large to allocate => return the size unchanged. | |
885 } else { | |
886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | |
887 size = partitionDirectMapSize(size); | |
888 } | |
889 return partitionCookieSizeAdjustSubtract(size); | |
890 #endif | |
891 } | |
892 | |
893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { | |
894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
895 return false; | |
896 #else | |
897 return true; | |
898 #endif | |
899 } | |
900 | |
901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { | |
902 // No need to lock here. Only 'ptr' being freed by another thread could | |
903 // cause trouble, and the caller is responsible for that not happening. | |
904 ASSERT(partitionAllocSupportsGetSize()); | |
905 ptr = partitionCookieFreePointerAdjust(ptr); | |
906 ASSERT(partitionPointerIsValid(ptr)); | |
907 PartitionPage* page = partitionPointerToPage(ptr); | |
908 size_t size = page->bucket->slotSize; | |
909 return partitionCookieSizeAdjustSubtract(size); | |
910 } | |
911 | |
912 // N (or more accurately, N - sizeof(void*)) represents the largest size in | |
913 // bytes that will be handled by a SizeSpecificPartitionAllocator. | |
914 // Attempts to partitionAlloc() more than this amount will fail. | |
915 template <size_t N> | |
916 class SizeSpecificPartitionAllocator { | |
917 public: | |
918 static const size_t kMaxAllocation = N - kAllocationGranularity; | |
919 static const size_t kNumBuckets = N / kAllocationGranularity; | |
920 void init() { | |
921 partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); | |
922 } | |
923 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); } | |
924 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; } | |
925 | |
926 private: | |
927 PartitionRoot m_partitionRoot; | |
928 PartitionBucket m_actualBuckets[kNumBuckets]; | |
929 }; | |
930 | |
931 class PartitionAllocatorGeneric { | |
932 public: | |
933 void init() { partitionAllocGenericInit(&m_partitionRoot); } | |
934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } | |
935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } | |
936 | |
937 private: | |
938 PartitionRootGeneric m_partitionRoot; | |
939 }; | |
940 | |
941 } // namespace WTF | |
942 | |
943 using WTF::SizeSpecificPartitionAllocator; | |
944 using WTF::PartitionAllocatorGeneric; | |
945 using WTF::PartitionRoot; | |
946 using WTF::partitionAllocInit; | |
947 using WTF::partitionAllocShutdown; | |
948 using WTF::partitionAlloc; | |
949 using WTF::partitionFree; | |
950 using WTF::partitionAllocGeneric; | |
951 using WTF::partitionFreeGeneric; | |
952 using WTF::partitionReallocGeneric; | |
953 using WTF::partitionAllocActualSize; | |
954 using WTF::partitionAllocSupportsGetSize; | |
955 using WTF::partitionAllocGetSize; | |
956 | |
957 #endif // WTF_PartitionAlloc_h | |
OLD | NEW |