Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(307)

Side by Side Diff: third_party/WebKit/Source/wtf/PartitionAlloc.h

Issue 1611343002: wtf reformat test Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: pydent Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
116 // We also have the concept of "super pages" -- these are the underlying system 116 // We also have the concept of "super pages" -- these are the underlying system
117 // allocations we make. Super pages contain multiple partition pages inside them 117 // allocations we make. Super pages contain multiple partition pages inside them
118 // and include space for a small amount of metadata per partition page. 118 // and include space for a small amount of metadata per partition page.
119 // Inside super pages, we store "slot spans". A slot span is a continguous range 119 // Inside super pages, we store "slot spans". A slot span is a continguous range
120 // of one or more partition pages that stores allocations of the same size. 120 // of one or more partition pages that stores allocations of the same size.
121 // Slot span sizes are adjusted depending on the allocation size, to make sure 121 // Slot span sizes are adjusted depending on the allocation size, to make sure
122 // the packing does not lead to unused (wasted) space at the end of the last 122 // the packing does not lead to unused (wasted) space at the end of the last
123 // system page of the span. For our current max slot span size of 64k and other 123 // system page of the span. For our current max slot span size of 64k and other
124 // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up 124 // constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up
125 // against the end of a system page. 125 // against the end of a system page.
126 static const size_t kPartitionPageShift = 14; // 16KB 126 static const size_t kPartitionPageShift = 14; // 16KB
127 static const size_t kPartitionPageSize = 1 << kPartitionPageShift; 127 static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
128 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; 128 static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
129 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; 129 static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
130 static const size_t kMaxPartitionPagesPerSlotSpan = 4; 130 static const size_t kMaxPartitionPagesPerSlotSpan = 4;
131 131
132 // To avoid fragmentation via never-used freelist entries, we hand out partition 132 // To avoid fragmentation via never-used freelist entries, we hand out partition
133 // freelist sections gradually, in units of the dominant system page size. 133 // freelist sections gradually, in units of the dominant system page size.
134 // What we're actually doing is avoiding filling the full partition page (16 KB) 134 // What we're actually doing is avoiding filling the full partition page (16 KB)
135 // with freelist pointers right away. Writing freelist pointers will fault and 135 // with freelist pointers right away. Writing freelist pointers will fault and
136 // dirty a private page, which is very wasteful if we never actually store 136 // dirty a private page, which is very wasteful if we never actually store
137 // objects there. 137 // objects there.
138 static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSyst emPageSize; 138 static const size_t kNumSystemPagesPerPartitionPage =
139 static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan; 139 kPartitionPageSize / kSystemPageSize;
140 static const size_t kMaxSystemPagesPerSlotSpan =
141 kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
140 142
141 // We reserve virtual address space in 2MB chunks (aligned to 2MB as well). 143 // We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
142 // These chunks are called "super pages". We do this so that we can store 144 // These chunks are called "super pages". We do this so that we can store
143 // metadata in the first few pages of each 2MB aligned section. This leads to 145 // metadata in the first few pages of each 2MB aligned section. This leads to
144 // a very fast free(). We specifically choose 2MB because this virtual address 146 // a very fast free(). We specifically choose 2MB because this virtual address
145 // block represents a full but single PTE allocation on ARM, ia32 and x64. 147 // block represents a full but single PTE allocation on ARM, ia32 and x64.
146 // 148 //
147 // The layout of the super page is as follows. The sizes below are the same 149 // The layout of the super page is as follows. The sizes below are the same
148 // for 32 bit and 64 bit. 150 // for 32 bit and 64 bit.
149 // 151 //
150 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Slot span | Slot span | ... | Slot span | Guard page (4KB) | 152 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Slot span | Slot span | ... | Slot span | Guard page (4KB) |
151 // 153 //
152 // - Each slot span is a contiguous range of one or more PartitionPages. 154 // - Each slot span is a contiguous range of one or more PartitionPages.
153 // - The metadata page has the following format. Note that the PartitionPage 155 // - The metadata page has the following format. Note that the PartitionPage
154 // that is not at the head of a slot span is "unused". In other words, 156 // that is not at the head of a slot span is "unused". In other words,
155 // the metadata for the slot span is stored only in the first PartitionPage 157 // the metadata for the slot span is stored only in the first PartitionPage
156 // of the slot span. Metadata accesses to other PartitionPages are 158 // of the slot span. Metadata accesses to other PartitionPages are
157 // redirected to the first PartitionPage. 159 // redirected to the first PartitionPage.
158 // 160 //
159 // | SuperPageExtentEntry (32B) | PartitionPage of slot span 1 (32B, used) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 2 (32B, used) | PartitionPage of slot span 3 (32B, used) | ... | PartitionPage of slot span N (32B, unused) | 161 // | SuperPageExtentEntry (32B) | PartitionPage of slot span 1 (32B, used) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 2 (32B, used) | PartitionPage of slot span 3 (32B, used) | ... | PartitionPage of slot span N (32B, unused) |
160 // 162 //
161 // A direct mapped page has a similar layout to fake it looking like a super pag e: 163 // A direct mapped page has a similar layout to fake it looking like a super pag e:
162 // 164 //
163 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Direct map ped object | Guard page (4KB) | 165 // | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Direct map ped object | Guard page (4KB) |
164 // 166 //
165 // - The metadata page has the following layout: 167 // - The metadata page has the following layout:
166 // 168 //
167 // | SuperPageExtentEntry (32B) | PartitionPage (32B) | PartitionBucket (32B ) | PartitionDirectMapExtent (8B) | 169 // | SuperPageExtentEntry (32B) | PartitionPage (32B) | PartitionBucket (32B ) | PartitionDirectMapExtent (8B) |
168 static const size_t kSuperPageShift = 21; // 2MB 170 static const size_t kSuperPageShift = 21; // 2MB
169 static const size_t kSuperPageSize = 1 << kSuperPageShift; 171 static const size_t kSuperPageSize = 1 << kSuperPageShift;
170 static const size_t kSuperPageOffsetMask = kSuperPageSize - 1; 172 static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
171 static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; 173 static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
172 static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartition PageSize; 174 static const size_t kNumPartitionPagesPerSuperPage =
175 kSuperPageSize / kPartitionPageSize;
173 176
174 static const size_t kPageMetadataShift = 5; // 32 bytes per partition page. 177 static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
175 static const size_t kPageMetadataSize = 1 << kPageMetadataShift; 178 static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
176 179
177 // The following kGeneric* constants apply to the generic variants of the API. 180 // The following kGeneric* constants apply to the generic variants of the API.
178 // The "order" of an allocation is closely related to the power-of-two size of 181 // The "order" of an allocation is closely related to the power-of-two size of
179 // the allocation. More precisely, the order is the bit index of the 182 // the allocation. More precisely, the order is the bit index of the
180 // most-significant-bit in the allocation size, where the bit numbers starts 183 // most-significant-bit in the allocation size, where the bit numbers starts
181 // at index 1 for the least-significant-bit. 184 // at index 1 for the least-significant-bit.
182 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2 185 // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
183 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15. 186 // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
184 static const size_t kGenericMinBucketedOrder = 4; // 8 bytes. 187 static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
185 static const size_t kGenericMaxBucketedOrder = 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB) 188 static const size_t kGenericMaxBucketedOrder =
186 static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGen ericMinBucketedOrder) + 1; 189 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
187 static const size_t kGenericNumBucketsPerOrderBits = 3; // Eight buckets per ord er (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240 190 static const size_t kGenericNumBucketedOrders =
188 static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrder Bits; 191 (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
189 static const size_t kGenericNumBuckets = kGenericNumBucketedOrders * kGenericNum BucketsPerOrder; 192 static const size_t kGenericNumBucketsPerOrderBits =
190 static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1) ; 193 3; // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240
191 static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); 194 static const size_t kGenericNumBucketsPerOrder =
192 static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); 195 1 << kGenericNumBucketsPerOrderBits;
193 static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; / / Limit when downsizing a direct mapping using realloc(). 196 static const size_t kGenericNumBuckets =
197 kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
198 static const size_t kGenericSmallestBucket = 1
199 << (kGenericMinBucketedOrder - 1);
200 static const size_t kGenericMaxBucketSpacing =
201 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
202 static const size_t kGenericMaxBucketed =
203 (1 << (kGenericMaxBucketedOrder - 1)) +
204 ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
205 static const size_t kGenericMinDirectMappedDownsize =
206 kGenericMaxBucketed +
207 1; // Limit when downsizing a direct mapping using realloc().
194 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; 208 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
195 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; 209 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT;
196 210
197 // Constants for the memory reclaim logic. 211 // Constants for the memory reclaim logic.
198 static const size_t kMaxFreeableSpans = 16; 212 static const size_t kMaxFreeableSpans = 16;
199 213
200 // If the total size in bytes of allocated but not committed pages exceeds this 214 // If the total size in bytes of allocated but not committed pages exceeds this
201 // value (probably it is a "out of virtual address space" crash), 215 // value (probably it is a "out of virtual address space" crash),
202 // a special crash stack trace is generated at |partitionOutOfMemory|. 216 // a special crash stack trace is generated at |partitionOutOfMemory|.
203 // This is to distinguish "out of virtual address space" from 217 // This is to distinguish "out of virtual address space" from
204 // "out of physical memory" in crash reports. 218 // "out of physical memory" in crash reports.
205 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 219 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
206 220
207 #if ENABLE(ASSERT) 221 #if ENABLE(ASSERT)
208 // These two byte values match tcmalloc. 222 // These two byte values match tcmalloc.
209 static const unsigned char kUninitializedByte = 0xAB; 223 static const unsigned char kUninitializedByte = 0xAB;
210 static const unsigned char kFreedByte = 0xCD; 224 static const unsigned char kFreedByte = 0xCD;
211 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction s on Intel. 225 static const size_t kCookieSize =
212 static const unsigned char kCookieValue[kCookieSize] = { 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E }; 226 16; // Handles alignment up to XMM instructions on Intel.
227 static const unsigned char kCookieValue[kCookieSize] = {
228 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
229 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
213 #endif 230 #endif
214 231
215 struct PartitionBucket; 232 struct PartitionBucket;
216 struct PartitionRootBase; 233 struct PartitionRootBase;
217 234
218 struct PartitionFreelistEntry { 235 struct PartitionFreelistEntry {
219 PartitionFreelistEntry* next; 236 PartitionFreelistEntry* next;
220 }; 237 };
221 238
222 // Some notes on page states. A page can be in one of four major states: 239 // Some notes on page states. A page can be in one of four major states:
223 // 1) Active. 240 // 1) Active.
224 // 2) Full. 241 // 2) Full.
225 // 3) Empty. 242 // 3) Empty.
226 // 4) Decommitted. 243 // 4) Decommitted.
227 // An active page has available free slots. A full page has no free slots. An 244 // An active page has available free slots. A full page has no free slots. An
228 // empty page has no free slots, and a decommitted page is an empty page that 245 // empty page has no free slots, and a decommitted page is an empty page that
229 // had its backing memory released back to the system. 246 // had its backing memory released back to the system.
230 // There are two linked lists tracking the pages. The "active page" list is an 247 // There are two linked lists tracking the pages. The "active page" list is an
231 // approximation of a list of active pages. It is an approximation because 248 // approximation of a list of active pages. It is an approximation because
232 // full, empty and decommitted pages may briefly be present in the list until 249 // full, empty and decommitted pages may briefly be present in the list until
233 // we next do a scan over it. 250 // we next do a scan over it.
234 // The "empty page" list is an accurate list of pages which are either empty 251 // The "empty page" list is an accurate list of pages which are either empty
235 // or decommitted. 252 // or decommitted.
236 // 253 //
237 // The significant page transitions are: 254 // The significant page transitions are:
238 // - free() will detect when a full page has a slot free()'d and immediately 255 // - free() will detect when a full page has a slot free()'d and immediately
239 // return the page to the head of the active list. 256 // return the page to the head of the active list.
240 // - free() will detect when a page is fully emptied. It _may_ add it to the 257 // - free() will detect when a page is fully emptied. It _may_ add it to the
241 // empty list or it _may_ leave it on the active list until a future list scan. 258 // empty list or it _may_ leave it on the active list until a future list scan.
242 // - malloc() _may_ scan the active page list in order to fulfil the request. 259 // - malloc() _may_ scan the active page list in order to fulfil the request.
243 // If it does this, full, empty and decommitted pages encountered will be 260 // If it does this, full, empty and decommitted pages encountered will be
244 // booted out of the active list. If there are no suitable active pages found, 261 // booted out of the active list. If there are no suitable active pages found,
245 // an empty or decommitted page (if one exists) will be pulled from the empty 262 // an empty or decommitted page (if one exists) will be pulled from the empty
246 // list on to the active list. 263 // list on to the active list.
247 struct PartitionPage { 264 struct PartitionPage {
248 PartitionFreelistEntry* freelistHead; 265 PartitionFreelistEntry* freelistHead;
249 PartitionPage* nextPage; 266 PartitionPage* nextPage;
250 PartitionBucket* bucket; 267 PartitionBucket* bucket;
251 int16_t numAllocatedSlots; // Deliberately signed, 0 for empty or decommitte d page, -n for full pages. 268 int16_t
252 uint16_t numUnprovisionedSlots; 269 numAllocatedSlots; // Deliberately signed, 0 for empty or decommitted pag e, -n for full pages.
253 uint16_t pageOffset; 270 uint16_t numUnprovisionedSlots;
254 int16_t emptyCacheIndex; // -1 if not in the empty cache. 271 uint16_t pageOffset;
272 int16_t emptyCacheIndex; // -1 if not in the empty cache.
255 }; 273 };
256 274
257 struct PartitionBucket { 275 struct PartitionBucket {
258 PartitionPage* activePagesHead; // Accessed most in hot path => goes first. 276 PartitionPage* activePagesHead; // Accessed most in hot path => goes first.
259 PartitionPage* emptyPagesHead; 277 PartitionPage* emptyPagesHead;
260 PartitionPage* decommittedPagesHead; 278 PartitionPage* decommittedPagesHead;
261 uint32_t slotSize; 279 uint32_t slotSize;
262 uint16_t numSystemPagesPerSlotSpan; 280 uint16_t numSystemPagesPerSlotSpan;
263 uint16_t numFullPages; 281 uint16_t numFullPages;
264 }; 282 };
265 283
266 // An "extent" is a span of consecutive superpages. We link to the partition's 284 // An "extent" is a span of consecutive superpages. We link to the partition's
267 // next extent (if there is one) at the very start of a superpage's metadata 285 // next extent (if there is one) at the very start of a superpage's metadata
268 // area. 286 // area.
269 struct PartitionSuperPageExtentEntry { 287 struct PartitionSuperPageExtentEntry {
270 PartitionRootBase* root; 288 PartitionRootBase* root;
271 char* superPageBase; 289 char* superPageBase;
272 char* superPagesEnd; 290 char* superPagesEnd;
273 PartitionSuperPageExtentEntry* next; 291 PartitionSuperPageExtentEntry* next;
274 }; 292 };
275 293
276 struct PartitionDirectMapExtent { 294 struct PartitionDirectMapExtent {
277 PartitionDirectMapExtent* nextExtent; 295 PartitionDirectMapExtent* nextExtent;
278 PartitionDirectMapExtent* prevExtent; 296 PartitionDirectMapExtent* prevExtent;
279 PartitionBucket* bucket; 297 PartitionBucket* bucket;
280 size_t mapSize; // Mapped size, not including guard pages and meta-data. 298 size_t mapSize; // Mapped size, not including guard pages and meta-data.
281 }; 299 };
282 300
283 struct WTF_EXPORT PartitionRootBase { 301 struct WTF_EXPORT PartitionRootBase {
284 size_t totalSizeOfCommittedPages; 302 size_t totalSizeOfCommittedPages;
285 size_t totalSizeOfSuperPages; 303 size_t totalSizeOfSuperPages;
286 size_t totalSizeOfDirectMappedPages; 304 size_t totalSizeOfDirectMappedPages;
287 // Invariant: totalSizeOfCommittedPages <= totalSizeOfSuperPages + totalSize OfDirectMappedPages. 305 // Invariant: totalSizeOfCommittedPages <= totalSizeOfSuperPages + totalSizeOf DirectMappedPages.
288 unsigned numBuckets; 306 unsigned numBuckets;
289 unsigned maxAllocation; 307 unsigned maxAllocation;
290 bool initialized; 308 bool initialized;
291 char* nextSuperPage; 309 char* nextSuperPage;
292 char* nextPartitionPage; 310 char* nextPartitionPage;
293 char* nextPartitionPageEnd; 311 char* nextPartitionPageEnd;
294 PartitionSuperPageExtentEntry* currentExtent; 312 PartitionSuperPageExtentEntry* currentExtent;
295 PartitionSuperPageExtentEntry* firstExtent; 313 PartitionSuperPageExtentEntry* firstExtent;
296 PartitionDirectMapExtent* directMapList; 314 PartitionDirectMapExtent* directMapList;
297 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; 315 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
298 int16_t globalEmptyPageRingIndex; 316 int16_t globalEmptyPageRingIndex;
299 uintptr_t invertedSelf; 317 uintptr_t invertedSelf;
300 318
301 static SpinLock gInitializedLock; 319 static SpinLock gInitializedLock;
302 static bool gInitialized; 320 static bool gInitialized;
303 // gSeedPage is used as a sentinel to indicate that there is no page 321 // gSeedPage is used as a sentinel to indicate that there is no page
304 // in the active page list. We can use nullptr, but in that case we need 322 // in the active page list. We can use nullptr, but in that case we need
305 // to add a null-check branch to the hot allocation path. We want to avoid 323 // to add a null-check branch to the hot allocation path. We want to avoid
306 // that. 324 // that.
307 static PartitionPage gSeedPage; 325 static PartitionPage gSeedPage;
308 static PartitionBucket gPagedBucket; 326 static PartitionBucket gPagedBucket;
309 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. 327 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
310 static void (*gOomHandlingFunction)(); 328 static void (*gOomHandlingFunction)();
311 }; 329 };
312 330
313 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. 331 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
314 struct PartitionRoot : public PartitionRootBase { 332 struct PartitionRoot : public PartitionRootBase {
315 // The PartitionAlloc templated class ensures the following is correct. 333 // The PartitionAlloc templated class ensures the following is correct.
316 ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<Partition Bucket*>(this + 1); } 334 ALWAYS_INLINE PartitionBucket* buckets() {
317 ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_ca st<const PartitionBucket*>(this + 1); } 335 return reinterpret_cast<PartitionBucket*>(this + 1);
336 }
337 ALWAYS_INLINE const PartitionBucket* buckets() const {
338 return reinterpret_cast<const PartitionBucket*>(this + 1);
339 }
318 }; 340 };
319 341
320 // Never instantiate a PartitionRootGeneric directly, instead use PartitionAlloc atorGeneric. 342 // Never instantiate a PartitionRootGeneric directly, instead use PartitionAlloc atorGeneric.
321 struct PartitionRootGeneric : public PartitionRootBase { 343 struct PartitionRootGeneric : public PartitionRootBase {
322 SpinLock lock; 344 SpinLock lock;
323 // Some pre-computed constants. 345 // Some pre-computed constants.
324 size_t orderIndexShifts[kBitsPerSizet + 1]; 346 size_t orderIndexShifts[kBitsPerSizet + 1];
325 size_t orderSubIndexMasks[kBitsPerSizet + 1]; 347 size_t orderSubIndexMasks[kBitsPerSizet + 1];
326 // The bucket lookup table lets us map a size_t to a bucket quickly. 348 // The bucket lookup table lets us map a size_t to a bucket quickly.
327 // The trailing +1 caters for the overflow case for very large allocation si zes. 349 // The trailing +1 caters for the overflow case for very large allocation size s.
328 // It is one flat array instead of a 2D array because in the 2D world, we'd 350 // It is one flat array instead of a 2D array because in the 2D world, we'd
329 // need to index array[blah][max+1] which risks undefined behavior. 351 // need to index array[blah][max+1] which risks undefined behavior.
330 PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerO rder) + 1]; 352 PartitionBucket*
331 PartitionBucket buckets[kGenericNumBuckets]; 353 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
354 PartitionBucket buckets[kGenericNumBuckets];
332 }; 355 };
333 356
334 // Flags for partitionAllocGenericFlags. 357 // Flags for partitionAllocGenericFlags.
335 enum PartitionAllocFlags { 358 enum PartitionAllocFlags {
336 PartitionAllocReturnNull = 1 << 0, 359 PartitionAllocReturnNull = 1 << 0,
337 }; 360 };
338 361
339 // Struct used to retrieve total memory usage of a partition. Used by 362 // Struct used to retrieve total memory usage of a partition. Used by
340 // PartitionStatsDumper implementation. 363 // PartitionStatsDumper implementation.
341 struct PartitionMemoryStats { 364 struct PartitionMemoryStats {
342 size_t totalMmappedBytes; // Total bytes mmaped from the system. 365 size_t totalMmappedBytes; // Total bytes mmaped from the system.
343 size_t totalCommittedBytes; // Total size of commmitted pages. 366 size_t totalCommittedBytes; // Total size of commmitted pages.
344 size_t totalResidentBytes; // Total bytes provisioned by the partition. 367 size_t totalResidentBytes; // Total bytes provisioned by the partition.
345 size_t totalActiveBytes; // Total active bytes in the partition. 368 size_t totalActiveBytes; // Total active bytes in the partition.
346 size_t totalDecommittableBytes; // Total bytes that could be decommitted. 369 size_t totalDecommittableBytes; // Total bytes that could be decommitted.
347 size_t totalDiscardableBytes; // Total bytes that could be discarded. 370 size_t totalDiscardableBytes; // Total bytes that could be discarded.
348 }; 371 };
349 372
350 // Struct used to retrieve memory statistics about a partition bucket. Used by 373 // Struct used to retrieve memory statistics about a partition bucket. Used by
351 // PartitionStatsDumper implementation. 374 // PartitionStatsDumper implementation.
352 struct PartitionBucketMemoryStats { 375 struct PartitionBucketMemoryStats {
353 bool isValid; // Used to check if the stats is valid. 376 bool isValid; // Used to check if the stats is valid.
354 bool isDirectMap; // True if this is a direct mapping; size will not be uniq ue. 377 bool
355 uint32_t bucketSlotSize; // The size of the slot in bytes. 378 isDirectMap; // True if this is a direct mapping; size will not be unique .
356 uint32_t allocatedPageSize; // Total size the partition page allocated from the system. 379 uint32_t bucketSlotSize; // The size of the slot in bytes.
357 uint32_t activeBytes; // Total active bytes used in the bucket. 380 uint32_t
358 uint32_t residentBytes; // Total bytes provisioned in the bucket. 381 allocatedPageSize; // Total size the partition page allocated from the sy stem.
359 uint32_t decommittableBytes; // Total bytes that could be decommitted. 382 uint32_t activeBytes; // Total active bytes used in the bucket.
360 uint32_t discardableBytes; // Total bytes that could be discarded. 383 uint32_t residentBytes; // Total bytes provisioned in the bucket.
361 uint32_t numFullPages; // Number of pages with all slots allocated. 384 uint32_t decommittableBytes; // Total bytes that could be decommitted.
362 uint32_t numActivePages; // Number of pages that have at least one provision ed slot. 385 uint32_t discardableBytes; // Total bytes that could be discarded.
363 uint32_t numEmptyPages; // Number of pages that are empty but not decommitte d. 386 uint32_t numFullPages; // Number of pages with all slots allocated.
364 uint32_t numDecommittedPages; // Number of pages that are empty and decommit ted. 387 uint32_t
388 numActivePages; // Number of pages that have at least one provisioned slo t.
389 uint32_t
390 numEmptyPages; // Number of pages that are empty but not decommitted.
391 uint32_t
392 numDecommittedPages; // Number of pages that are empty and decommitted.
365 }; 393 };
366 394
367 // Interface that is passed to partitionDumpStats and 395 // Interface that is passed to partitionDumpStats and
368 // partitionDumpStatsGeneric for using the memory statistics. 396 // partitionDumpStatsGeneric for using the memory statistics.
369 class WTF_EXPORT PartitionStatsDumper { 397 class WTF_EXPORT PartitionStatsDumper {
370 public: 398 public:
371 // Called to dump total memory used by partition, once per partition. 399 // Called to dump total memory used by partition, once per partition.
372 virtual void partitionDumpTotals(const char* partitionName, const PartitionM emoryStats*) = 0; 400 virtual void partitionDumpTotals(const char* partitionName,
373 401 const PartitionMemoryStats*) = 0;
374 // Called to dump stats about buckets, for each bucket. 402
375 virtual void partitionsDumpBucketStats(const char* partitionName, const Part itionBucketMemoryStats*) = 0; 403 // Called to dump stats about buckets, for each bucket.
404 virtual void partitionsDumpBucketStats(const char* partitionName,
405 const PartitionBucketMemoryStats*) = 0;
376 }; 406 };
377 407
378 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); 408 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)());
379 WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t max Allocation); 409 WTF_EXPORT void partitionAllocInit(PartitionRoot*,
410 size_t numBuckets,
411 size_t maxAllocation);
380 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); 412 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*);
381 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); 413 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
382 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); 414 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
383 415
384 enum PartitionPurgeFlags { 416 enum PartitionPurgeFlags {
385 // Decommitting the ring list of empty pages is reasonably fast. 417 // Decommitting the ring list of empty pages is reasonably fast.
386 PartitionPurgeDecommitEmptyPages = 1 << 0, 418 PartitionPurgeDecommitEmptyPages = 1 << 0,
387 // Discarding unused system pages is slower, because it involves walking all 419 // Discarding unused system pages is slower, because it involves walking all
388 // freelists in all active partition pages of all buckets >= system page 420 // freelists in all active partition pages of all buckets >= system page
389 // size. It often frees a similar amount of memory to decommitting the empty 421 // size. It often frees a similar amount of memory to decommitting the empty
390 // pages, though. 422 // pages, though.
391 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, 423 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
392 }; 424 };
393 425
394 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); 426 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
395 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); 427 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
396 428
397 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, si ze_t, PartitionBucket*); 429 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*,
430 int,
431 size_t,
432 PartitionBucket*);
398 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); 433 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
399 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, voi d*, size_t, const char* typeName); 434 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*,
400 435 void*,
401 WTF_EXPORT void partitionDumpStats(PartitionRoot*, const char* partitionName, bo ol isLightDump, PartitionStatsDumper*); 436 size_t,
402 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, const char* par titionName, bool isLightDump, PartitionStatsDumper*); 437 const char* typeName);
438
439 WTF_EXPORT void partitionDumpStats(PartitionRoot*,
440 const char* partitionName,
441 bool isLightDump,
442 PartitionStatsDumper*);
443 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*,
444 const char* partitionName,
445 bool isLightDump,
446 PartitionStatsDumper*);
403 447
404 class WTF_EXPORT PartitionAllocHooks { 448 class WTF_EXPORT PartitionAllocHooks {
405 public: 449 public:
406 typedef void AllocationHook(void* address, size_t, const char* typeName); 450 typedef void AllocationHook(void* address, size_t, const char* typeName);
407 typedef void FreeHook(void* address); 451 typedef void FreeHook(void* address);
408 452
409 static void setAllocationHook(AllocationHook* hook) { m_allocationHook = hoo k; } 453 static void setAllocationHook(AllocationHook* hook) {
410 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } 454 m_allocationHook = hook;
411 455 }
412 static void allocationHookIfEnabled(void* address, size_t size, const char* typeName) 456 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; }
413 { 457
414 AllocationHook* allocationHook = m_allocationHook; 458 static void allocationHookIfEnabled(void* address,
415 if (UNLIKELY(allocationHook != nullptr)) 459 size_t size,
416 allocationHook(address, size, typeName); 460 const char* typeName) {
461 AllocationHook* allocationHook = m_allocationHook;
462 if (UNLIKELY(allocationHook != nullptr))
463 allocationHook(address, size, typeName);
464 }
465
466 static void freeHookIfEnabled(void* address) {
467 FreeHook* freeHook = m_freeHook;
468 if (UNLIKELY(freeHook != nullptr))
469 freeHook(address);
470 }
471
472 static void reallocHookIfEnabled(void* oldAddress,
473 void* newAddress,
474 size_t size,
475 const char* typeName) {
476 // Report a reallocation as a free followed by an allocation.
477 AllocationHook* allocationHook = m_allocationHook;
478 FreeHook* freeHook = m_freeHook;
479 if (UNLIKELY(allocationHook && freeHook)) {
480 freeHook(oldAddress);
481 allocationHook(newAddress, size, typeName);
417 } 482 }
418 483 }
419 static void freeHookIfEnabled(void* address) 484
420 { 485 private:
421 FreeHook* freeHook = m_freeHook; 486 // Pointers to hook functions that PartitionAlloc will call on allocation and
422 if (UNLIKELY(freeHook != nullptr)) 487 // free if the pointers are non-null.
423 freeHook(address); 488 static AllocationHook* m_allocationHook;
424 } 489 static FreeHook* m_freeHook;
425
426 static void reallocHookIfEnabled(void* oldAddress, void* newAddress, size_t size, const char* typeName)
427 {
428 // Report a reallocation as a free followed by an allocation.
429 AllocationHook* allocationHook = m_allocationHook;
430 FreeHook* freeHook = m_freeHook;
431 if (UNLIKELY(allocationHook && freeHook)) {
432 freeHook(oldAddress);
433 allocationHook(newAddress, size, typeName);
434 }
435 }
436
437 private:
438 // Pointers to hook functions that PartitionAlloc will call on allocation an d
439 // free if the pointers are non-null.
440 static AllocationHook* m_allocationHook;
441 static FreeHook* m_freeHook;
442 }; 490 };
443 491
444 // In official builds, do not include type info string literals to avoid 492 // In official builds, do not include type info string literals to avoid
445 // bloating the binary. 493 // bloating the binary.
446 #if defined(OFFICIAL_BUILD) 494 #if defined(OFFICIAL_BUILD)
447 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr 495 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr
448 #else 496 #else
449 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>() 497 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>()
450 #endif 498 #endif
451 499
452 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEnt ry* ptr) 500 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(
453 { 501 PartitionFreelistEntry* ptr) {
454 // We use bswap on little endian as a fast mask for two reasons: 502 // We use bswap on little endian as a fast mask for two reasons:
455 // 1) If an object is freed and its vtable used where the attacker doesn't 503 // 1) If an object is freed and its vtable used where the attacker doesn't
456 // get the chance to run allocations between the free and use, the vtable 504 // get the chance to run allocations between the free and use, the vtable
457 // dereference is likely to fault. 505 // dereference is likely to fault.
458 // 2) If the attacker has a linear buffer overflow and elects to try and 506 // 2) If the attacker has a linear buffer overflow and elects to try and
459 // corrupt a freelist pointer, partial pointer overwrite attacks are 507 // corrupt a freelist pointer, partial pointer overwrite attacks are
460 // thwarted. 508 // thwarted.
461 // For big endian, similar guarantees are arrived at with a negation. 509 // For big endian, similar guarantees are arrived at with a negation.
462 #if CPU(BIG_ENDIAN) 510 #if CPU(BIG_ENDIAN)
463 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); 511 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
464 #else 512 #else
465 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); 513 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
466 #endif 514 #endif
467 return reinterpret_cast<PartitionFreelistEntry*>(masked); 515 return reinterpret_cast<PartitionFreelistEntry*>(masked);
468 } 516 }
469 517
470 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) 518 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) {
471 { 519 #if ENABLE(ASSERT)
472 #if ENABLE(ASSERT) 520 // Add space for cookies, checking for integer overflow.
473 // Add space for cookies, checking for integer overflow. 521 ASSERT(size + (2 * kCookieSize) > size);
474 ASSERT(size + (2 * kCookieSize) > size); 522 size += 2 * kCookieSize;
475 size += 2 * kCookieSize; 523 #endif
476 #endif 524 return size;
477 return size; 525 }
478 } 526
479 527 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) {
480 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) 528 #if ENABLE(ASSERT)
481 { 529 // Remove space for cookies.
482 #if ENABLE(ASSERT) 530 ASSERT(size >= 2 * kCookieSize);
483 // Remove space for cookies. 531 size -= 2 * kCookieSize;
484 ASSERT(size >= 2 * kCookieSize); 532 #endif
485 size -= 2 * kCookieSize; 533 return size;
486 #endif 534 }
487 return size; 535
488 } 536 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) {
489 537 #if ENABLE(ASSERT)
490 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) 538 // The value given to the application is actually just after the cookie.
491 { 539 ptr = static_cast<char*>(ptr) - kCookieSize;
492 #if ENABLE(ASSERT) 540 #endif
493 // The value given to the application is actually just after the cookie. 541 return ptr;
494 ptr = static_cast<char*>(ptr) - kCookieSize; 542 }
495 #endif 543
496 return ptr; 544 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) {
497 } 545 #if ENABLE(ASSERT)
498 546 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
499 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) 547 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
500 { 548 *cookiePtr = kCookieValue[i];
501 #if ENABLE(ASSERT) 549 #endif
502 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 550 }
503 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 551
504 *cookiePtr = kCookieValue[i]; 552 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) {
505 #endif 553 #if ENABLE(ASSERT)
506 } 554 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
507 555 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
508 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) 556 ASSERT(*cookiePtr == kCookieValue[i]);
509 { 557 #endif
510 #if ENABLE(ASSERT) 558 }
511 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 559
512 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 560 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) {
513 ASSERT(*cookiePtr == kCookieValue[i]); 561 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
514 #endif 562 ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
515 } 563 // The metadata area is exactly one system page (the guard page) into the
516 564 // super page.
517 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) 565 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
518 { 566 }
519 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 567
520 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); 568 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) {
521 // The metadata area is exactly one system page (the guard page) into the 569 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
522 // super page. 570 char* superPagePtr =
523 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); 571 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
524 } 572 uintptr_t partitionPageIndex =
525 573 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
526 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) 574 // Index 0 is invalid because it is the metadata and guard area and
527 { 575 // the last index is invalid because it is a guard page.
528 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 576 ASSERT(partitionPageIndex);
529 char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseM ask); 577 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
530 uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPa rtitionPageShift; 578 PartitionPage* page = reinterpret_cast<PartitionPage*>(
531 // Index 0 is invalid because it is the metadata and guard area and 579 partitionSuperPageToMetadataArea(superPagePtr) +
532 // the last index is invalid because it is a guard page. 580 (partitionPageIndex << kPageMetadataShift));
533 ASSERT(partitionPageIndex); 581 // Partition pages in the same slot span can share the same page object. Adjus t for that.
534 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 582 size_t delta = page->pageOffset << kPageMetadataShift;
535 PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToM etadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift)); 583 page =
536 // Partition pages in the same slot span can share the same page object. Adj ust for that. 584 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
537 size_t delta = page->pageOffset << kPageMetadataShift; 585 return page;
538 page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delt a); 586 }
539 return page; 587
540 } 588 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) {
541 589 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
542 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) 590 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
543 { 591 ASSERT(superPageOffset > kSystemPageSize);
544 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); 592 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
545 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); 593 kPageMetadataSize));
546 ASSERT(superPageOffset > kSystemPageSize); 594 uintptr_t partitionPageIndex =
547 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize)); 595 (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
548 uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageM etadataShift; 596 // Index 0 is invalid because it is the metadata area and the last index is in valid because it is a guard page.
549 // Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page. 597 ASSERT(partitionPageIndex);
550 ASSERT(partitionPageIndex); 598 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
551 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 599 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
552 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); 600 void* ret = reinterpret_cast<void*>(
553 void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << k PartitionPageShift)); 601 superPageBase + (partitionPageIndex << kPartitionPageShift));
554 return ret; 602 return ret;
555 } 603 }
556 604
557 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) 605 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) {
558 { 606 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
559 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 607 // Checks that the pointer is a multiple of bucket size.
560 // Checks that the pointer is a multiple of bucket size. 608 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) -
561 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(par titionPageToPointer(page))) % page->bucket->slotSize)); 609 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) %
562 return page; 610 page->bucket->slotSize));
563 } 611 return page;
564 612 }
565 ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket) 613
566 { 614 ALWAYS_INLINE bool partitionBucketIsDirectMapped(
567 return !bucket->numSystemPagesPerSlotSpan; 615 const PartitionBucket* bucket) {
568 } 616 return !bucket->numSystemPagesPerSlotSpan;
569 617 }
570 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) 618
571 { 619 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) {
572 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; 620 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
573 } 621 }
574 622
575 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) 623 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) {
576 { 624 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize);
577 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize ); 625 }
578 } 626
579 627 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) {
580 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) 628 // For single-slot buckets which span more than one partition page, we
581 { 629 // have some spare metadata space to store the raw allocation size. We
582 // For single-slot buckets which span more than one partition page, we 630 // can use this to report better statistics.
583 // have some spare metadata space to store the raw allocation size. We 631 PartitionBucket* bucket = page->bucket;
584 // can use this to report better statistics. 632 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
585 PartitionBucket* bucket = page->bucket; 633 return nullptr;
586 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) 634
587 return nullptr; 635 ASSERT((bucket->slotSize % kSystemPageSize) == 0);
588 636 ASSERT(partitionBucketIsDirectMapped(bucket) ||
589 ASSERT((bucket->slotSize % kSystemPageSize) == 0); 637 partitionBucketSlots(bucket) == 1);
590 ASSERT(partitionBucketIsDirectMapped(bucket) || partitionBucketSlots(bucket) == 1); 638 page++;
591 page++; 639 return reinterpret_cast<size_t*>(&page->freelistHead);
592 return reinterpret_cast<size_t*>(&page->freelistHead); 640 }
593 } 641
594 642 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) {
595 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) 643 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
596 { 644 if (UNLIKELY(rawSizePtr != nullptr))
597 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 645 return *rawSizePtr;
598 if (UNLIKELY(rawSizePtr != nullptr)) 646 return 0;
599 return *rawSizePtr; 647 }
648
649 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) {
650 PartitionSuperPageExtentEntry* extentEntry =
651 reinterpret_cast<PartitionSuperPageExtentEntry*>(
652 reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
653 return extentEntry->root;
654 }
655
656 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) {
657 PartitionPage* page = partitionPointerToPage(ptr);
658 PartitionRootBase* root = partitionPageToRoot(page);
659 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
660 }
661
662 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root,
663 int flags,
664 size_t size,
665 PartitionBucket* bucket) {
666 PartitionPage* page = bucket->activePagesHead;
667 // Check that this page is neither full nor freed.
668 ASSERT(page->numAllocatedSlots >= 0);
669 void* ret = page->freelistHead;
670 if (LIKELY(ret != 0)) {
671 // If these asserts fire, you probably corrupted memory.
672 ASSERT(partitionPointerIsValid(ret));
673 // All large allocations must go through the slow path to correctly
674 // update the size metadata.
675 ASSERT(partitionPageGetRawSize(page) == 0);
676 PartitionFreelistEntry* newHead =
677 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
678 page->freelistHead = newHead;
679 page->numAllocatedSlots++;
680 } else {
681 ret = partitionAllocSlowPath(root, flags, size, bucket);
682 ASSERT(!ret || partitionPointerIsValid(ret));
683 }
684 #if ENABLE(ASSERT)
685 if (!ret)
600 return 0; 686 return 0;
601 } 687 // Fill the uninitialized pattern, and write the cookies.
602 688 page = partitionPointerToPage(ret);
603 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) 689 size_t slotSize = page->bucket->slotSize;
604 { 690 size_t rawSize = partitionPageGetRawSize(page);
605 PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuper PageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); 691 if (rawSize) {
606 return extentEntry->root; 692 ASSERT(rawSize == size);
607 } 693 slotSize = rawSize;
608 694 }
609 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) 695 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
610 { 696 char* charRet = static_cast<char*>(ret);
611 PartitionPage* page = partitionPointerToPage(ptr); 697 // The value given to the application is actually just after the cookie.
612 PartitionRootBase* root = partitionPageToRoot(page); 698 ret = charRet + kCookieSize;
613 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); 699 memset(ret, kUninitializedByte, noCookieSize);
614 } 700 partitionCookieWriteValue(charRet);
615 701 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
616 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, siz e_t size, PartitionBucket* bucket) 702 #endif
617 { 703 return ret;
618 PartitionPage* page = bucket->activePagesHead; 704 }
619 // Check that this page is neither full nor freed. 705
620 ASSERT(page->numAllocatedSlots >= 0); 706 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root,
621 void* ret = page->freelistHead; 707 size_t size,
622 if (LIKELY(ret != 0)) { 708 const char* typeName) {
623 // If these asserts fire, you probably corrupted memory. 709 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
624 ASSERT(partitionPointerIsValid(ret)); 710 void* result = malloc(size);
625 // All large allocations must go through the slow path to correctly 711 RELEASE_ASSERT(result);
626 // update the size metadata. 712 return result;
627 ASSERT(partitionPageGetRawSize(page) == 0); 713 #else
628 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part itionFreelistEntry*>(ret)->next); 714 size_t requestedSize = size;
629 page->freelistHead = newHead; 715 size = partitionCookieSizeAdjustAdd(size);
630 page->numAllocatedSlots++; 716 ASSERT(root->initialized);
631 } else { 717 size_t index = size >> kBucketShift;
632 ret = partitionAllocSlowPath(root, flags, size, bucket); 718 ASSERT(index < root->numBuckets);
633 ASSERT(!ret || partitionPointerIsValid(ret)); 719 ASSERT(size == index << kBucketShift);
634 } 720 PartitionBucket* bucket = &root->buckets()[index];
635 #if ENABLE(ASSERT) 721 void* result = partitionBucketAlloc(root, 0, size, bucket);
636 if (!ret) 722 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName);
637 return 0; 723 return result;
638 // Fill the uninitialized pattern, and write the cookies. 724 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
639 page = partitionPointerToPage(ret); 725 }
640 size_t slotSize = page->bucket->slotSize; 726
641 size_t rawSize = partitionPageGetRawSize(page); 727 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) {
642 if (rawSize) { 728 // If these asserts fire, you probably corrupted memory.
643 ASSERT(rawSize == size); 729 #if ENABLE(ASSERT)
644 slotSize = rawSize; 730 size_t slotSize = page->bucket->slotSize;
645 } 731 size_t rawSize = partitionPageGetRawSize(page);
646 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); 732 if (rawSize)
647 char* charRet = static_cast<char*>(ret); 733 slotSize = rawSize;
648 // The value given to the application is actually just after the cookie. 734 partitionCookieCheckValue(ptr);
649 ret = charRet + kCookieSize; 735 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize -
650 memset(ret, kUninitializedByte, noCookieSize); 736 kCookieSize);
651 partitionCookieWriteValue(charRet); 737 memset(ptr, kFreedByte, slotSize);
652 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); 738 #endif
653 #endif 739 ASSERT(page->numAllocatedSlots);
654 return ret; 740 PartitionFreelistEntry* freelistHead = page->freelistHead;
655 } 741 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead));
656 742 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(
657 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size, const char* typeName) 743 ptr != freelistHead); // Catches an immediate double free.
658 { 744 ASSERT_WITH_SECURITY_IMPLICATION(
659 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 745 !freelistHead ||
660 void* result = malloc(size); 746 ptr !=
661 RELEASE_ASSERT(result); 747 partitionFreelistMask(
662 return result; 748 freelistHead
663 #else 749 ->next)); // Look for double free one level deeper in debug.
664 size_t requestedSize = size; 750 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
665 size = partitionCookieSizeAdjustAdd(size); 751 entry->next = partitionFreelistMask(freelistHead);
666 ASSERT(root->initialized); 752 page->freelistHead = entry;
667 size_t index = size >> kBucketShift; 753 --page->numAllocatedSlots;
668 ASSERT(index < root->numBuckets); 754 if (UNLIKELY(page->numAllocatedSlots <= 0)) {
669 ASSERT(size == index << kBucketShift); 755 partitionFreeSlowPath(page);
670 PartitionBucket* bucket = &root->buckets()[index]; 756 } else {
671 void* result = partitionBucketAlloc(root, 0, size, bucket); 757 // All single-slot allocations must go through the slow path to
672 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName ); 758 // correctly update the size metadata.
673 return result; 759 ASSERT(partitionPageGetRawSize(page) == 0);
674 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 760 }
675 } 761 }
676 762
677 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) 763 ALWAYS_INLINE void partitionFree(void* ptr) {
678 { 764 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
679 // If these asserts fire, you probably corrupted memory. 765 free(ptr);
680 #if ENABLE(ASSERT) 766 #else
681 size_t slotSize = page->bucket->slotSize; 767 PartitionAllocHooks::freeHookIfEnabled(ptr);
682 size_t rawSize = partitionPageGetRawSize(page); 768 ptr = partitionCookieFreePointerAdjust(ptr);
683 if (rawSize) 769 ASSERT(partitionPointerIsValid(ptr));
684 slotSize = rawSize; 770 PartitionPage* page = partitionPointerToPage(ptr);
685 partitionCookieCheckValue(ptr); 771 partitionFreeWithPage(ptr, page);
686 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - kCookieS ize); 772 #endif
687 memset(ptr, kFreedByte, slotSize); 773 }
688 #endif 774
689 ASSERT(page->numAllocatedSlots); 775 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(
690 PartitionFreelistEntry* freelistHead = page->freelistHead; 776 PartitionRootGeneric* root,
691 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); 777 size_t size) {
692 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ptr != freelistHead); // Catches an immediate double free. 778 size_t order = kBitsPerSizet - countLeadingZerosSizet(size);
693 ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMa sk(freelistHead->next)); // Look for double free one level deeper in debug. 779 // The order index is simply the next few bits after the most significant bit.
694 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); 780 size_t orderIndex = (size >> root->orderIndexShifts[order]) &
695 entry->next = partitionFreelistMask(freelistHead); 781 (kGenericNumBucketsPerOrder - 1);
696 page->freelistHead = entry; 782 // And if the remaining bits are non-zero we must bump the bucket up.
697 --page->numAllocatedSlots; 783 size_t subOrderIndex = size & root->orderSubIndexMasks[order];
698 if (UNLIKELY(page->numAllocatedSlots <= 0)) { 784 PartitionBucket* bucket =
699 partitionFreeSlowPath(page); 785 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) +
700 } else { 786 orderIndex + !!subOrderIndex];
701 // All single-slot allocations must go through the slow path to 787 ASSERT(!bucket->slotSize || bucket->slotSize >= size);
702 // correctly update the size metadata. 788 ASSERT(!(bucket->slotSize % kGenericSmallestBucket));
703 ASSERT(partitionPageGetRawSize(page) == 0); 789 return bucket;
704 } 790 }
705 } 791
706 792 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root,
707 ALWAYS_INLINE void partitionFree(void* ptr) 793 int flags,
708 { 794 size_t size,
709 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 795 const char* typeName) {
710 free(ptr); 796 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
711 #else 797 void* result = malloc(size);
712 PartitionAllocHooks::freeHookIfEnabled(ptr); 798 RELEASE_ASSERT(result);
713 ptr = partitionCookieFreePointerAdjust(ptr); 799 return result;
714 ASSERT(partitionPointerIsValid(ptr)); 800 #else
715 PartitionPage* page = partitionPointerToPage(ptr); 801 ASSERT(root->initialized);
802 size_t requestedSize = size;
803 size = partitionCookieSizeAdjustAdd(size);
804 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
805 void* ret = nullptr;
806 {
807 SpinLock::Guard guard(root->lock);
808 // TODO(bashi): Remove following RELEAE_ASSERT()s once we find the cause of
809 // http://crbug.com/514141
810 #if OS(ANDROID)
811 RELEASE_ASSERT(bucket >= &root->buckets[0] ||
812 bucket == &PartitionRootGeneric::gPagedBucket);
813 RELEASE_ASSERT(bucket <= &root->buckets[kGenericNumBuckets - 1] ||
814 bucket == &PartitionRootGeneric::gPagedBucket);
815 RELEASE_ASSERT(root->initialized);
816 #endif
817 ret = partitionBucketAlloc(root, flags, size, bucket);
818 }
819 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName);
820 return ret;
821 #endif
822 }
823
824 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root,
825 size_t size,
826 const char* typeName) {
827 return partitionAllocGenericFlags(root, 0, size, typeName);
828 }
829
830 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
831 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
832 free(ptr);
833 #else
834 ASSERT(root->initialized);
835
836 if (UNLIKELY(!ptr))
837 return;
838
839 PartitionAllocHooks::freeHookIfEnabled(ptr);
840 ptr = partitionCookieFreePointerAdjust(ptr);
841 ASSERT(partitionPointerIsValid(ptr));
842 PartitionPage* page = partitionPointerToPage(ptr);
843 {
844 SpinLock::Guard guard(root->lock);
716 partitionFreeWithPage(ptr, page); 845 partitionFreeWithPage(ptr, page);
717 #endif 846 }
718 } 847 #endif
719 848 }
720 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric * root, size_t size) 849
721 { 850 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) {
722 size_t order = kBitsPerSizet - countLeadingZerosSizet(size); 851 // Caller must check that the size is not above the kGenericMaxDirectMapped
723 // The order index is simply the next few bits after the most significant bi t. 852 // limit before calling. This also guards against integer overflow in the
724 size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBu cketsPerOrder - 1); 853 // calculation here.
725 // And if the remaining bits are non-zero we must bump the bucket up. 854 ASSERT(size <= kGenericMaxDirectMapped);
726 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; 855 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
727 PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPe rOrderBits) + orderIndex + !!subOrderIndex]; 856 }
728 ASSERT(!bucket->slotSize || bucket->slotSize >= size); 857
729 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); 858 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root,
730 return bucket; 859 size_t size) {
731 } 860 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
732 861 return size;
733 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int f lags, size_t size, const char* typeName) 862 #else
734 { 863 ASSERT(root->initialized);
735 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 864 size = partitionCookieSizeAdjustAdd(size);
736 void* result = malloc(size); 865 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
737 RELEASE_ASSERT(result); 866 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
738 return result; 867 size = bucket->slotSize;
739 #else 868 } else if (size > kGenericMaxDirectMapped) {
740 ASSERT(root->initialized); 869 // Too large to allocate => return the size unchanged.
741 size_t requestedSize = size; 870 } else {
742 size = partitionCookieSizeAdjustAdd(size); 871 ASSERT(bucket == &PartitionRootBase::gPagedBucket);
743 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 872 size = partitionDirectMapSize(size);
744 void* ret = nullptr; 873 }
745 { 874 return partitionCookieSizeAdjustSubtract(size);
746 SpinLock::Guard guard(root->lock); 875 #endif
747 // TODO(bashi): Remove following RELEAE_ASSERT()s once we find the cause of 876 }
748 // http://crbug.com/514141 877
749 #if OS(ANDROID) 878 ALWAYS_INLINE bool partitionAllocSupportsGetSize() {
750 RELEASE_ASSERT(bucket >= &root->buckets[0] || bucket == &PartitionRootGe neric::gPagedBucket); 879 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
751 RELEASE_ASSERT(bucket <= &root->buckets[kGenericNumBuckets - 1] || bucke t == &PartitionRootGeneric::gPagedBucket); 880 return false;
752 RELEASE_ASSERT(root->initialized); 881 #else
753 #endif 882 return true;
754 ret = partitionBucketAlloc(root, flags, size, bucket); 883 #endif
755 } 884 }
756 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); 885
757 return ret; 886 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) {
758 #endif 887 // No need to lock here. Only 'ptr' being freed by another thread could
759 } 888 // cause trouble, and the caller is responsible for that not happening.
760 889 ASSERT(partitionAllocSupportsGetSize());
761 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t siz e, const char* typeName) 890 ptr = partitionCookieFreePointerAdjust(ptr);
762 { 891 ASSERT(partitionPointerIsValid(ptr));
763 return partitionAllocGenericFlags(root, 0, size, typeName); 892 PartitionPage* page = partitionPointerToPage(ptr);
764 } 893 size_t size = page->bucket->slotSize;
765 894 return partitionCookieSizeAdjustSubtract(size);
766 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr)
767 {
768 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
769 free(ptr);
770 #else
771 ASSERT(root->initialized);
772
773 if (UNLIKELY(!ptr))
774 return;
775
776 PartitionAllocHooks::freeHookIfEnabled(ptr);
777 ptr = partitionCookieFreePointerAdjust(ptr);
778 ASSERT(partitionPointerIsValid(ptr));
779 PartitionPage* page = partitionPointerToPage(ptr);
780 {
781 SpinLock::Guard guard(root->lock);
782 partitionFreeWithPage(ptr, page);
783 }
784 #endif
785 }
786
787 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
788 {
789 // Caller must check that the size is not above the kGenericMaxDirectMapped
790 // limit before calling. This also guards against integer overflow in the
791 // calculation here.
792 ASSERT(size <= kGenericMaxDirectMapped);
793 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
794 }
795
796 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
797 {
798 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
799 return size;
800 #else
801 ASSERT(root->initialized);
802 size = partitionCookieSizeAdjustAdd(size);
803 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
804 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
805 size = bucket->slotSize;
806 } else if (size > kGenericMaxDirectMapped) {
807 // Too large to allocate => return the size unchanged.
808 } else {
809 ASSERT(bucket == &PartitionRootBase::gPagedBucket);
810 size = partitionDirectMapSize(size);
811 }
812 return partitionCookieSizeAdjustSubtract(size);
813 #endif
814 }
815
816 ALWAYS_INLINE bool partitionAllocSupportsGetSize()
817 {
818 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
819 return false;
820 #else
821 return true;
822 #endif
823 }
824
825 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr)
826 {
827 // No need to lock here. Only 'ptr' being freed by another thread could
828 // cause trouble, and the caller is responsible for that not happening.
829 ASSERT(partitionAllocSupportsGetSize());
830 ptr = partitionCookieFreePointerAdjust(ptr);
831 ASSERT(partitionPointerIsValid(ptr));
832 PartitionPage* page = partitionPointerToPage(ptr);
833 size_t size = page->bucket->slotSize;
834 return partitionCookieSizeAdjustSubtract(size);
835 } 895 }
836 896
837 // N (or more accurately, N - sizeof(void*)) represents the largest size in 897 // N (or more accurately, N - sizeof(void*)) represents the largest size in
838 // bytes that will be handled by a SizeSpecificPartitionAllocator. 898 // bytes that will be handled by a SizeSpecificPartitionAllocator.
839 // Attempts to partitionAlloc() more than this amount will fail. 899 // Attempts to partitionAlloc() more than this amount will fail.
840 template <size_t N> 900 template <size_t N>
841 class SizeSpecificPartitionAllocator { 901 class SizeSpecificPartitionAllocator {
842 public: 902 public:
843 static const size_t kMaxAllocation = N - kAllocationGranularity; 903 static const size_t kMaxAllocation = N - kAllocationGranularity;
844 static const size_t kNumBuckets = N / kAllocationGranularity; 904 static const size_t kNumBuckets = N / kAllocationGranularity;
845 void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocati on); } 905 void init() {
846 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); } 906 partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation);
847 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; } 907 }
848 private: 908 bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
849 PartitionRoot m_partitionRoot; 909 ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
850 PartitionBucket m_actualBuckets[kNumBuckets]; 910
911 private:
912 PartitionRoot m_partitionRoot;
913 PartitionBucket m_actualBuckets[kNumBuckets];
851 }; 914 };
852 915
853 class PartitionAllocatorGeneric { 916 class PartitionAllocatorGeneric {
854 public: 917 public:
855 void init() { partitionAllocGenericInit(&m_partitionRoot); } 918 void init() { partitionAllocGenericInit(&m_partitionRoot); }
856 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } 919 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
857 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } 920 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
858 private: 921
859 PartitionRootGeneric m_partitionRoot; 922 private:
923 PartitionRootGeneric m_partitionRoot;
860 }; 924 };
861 925
862 } // namespace WTF 926 } // namespace WTF
863 927
864 using WTF::SizeSpecificPartitionAllocator; 928 using WTF::SizeSpecificPartitionAllocator;
865 using WTF::PartitionAllocatorGeneric; 929 using WTF::PartitionAllocatorGeneric;
866 using WTF::PartitionRoot; 930 using WTF::PartitionRoot;
867 using WTF::partitionAllocInit; 931 using WTF::partitionAllocInit;
868 using WTF::partitionAllocShutdown; 932 using WTF::partitionAllocShutdown;
869 using WTF::partitionAlloc; 933 using WTF::partitionAlloc;
870 using WTF::partitionFree; 934 using WTF::partitionFree;
871 using WTF::partitionAllocGeneric; 935 using WTF::partitionAllocGeneric;
872 using WTF::partitionFreeGeneric; 936 using WTF::partitionFreeGeneric;
873 using WTF::partitionReallocGeneric; 937 using WTF::partitionReallocGeneric;
874 using WTF::partitionAllocActualSize; 938 using WTF::partitionAllocActualSize;
875 using WTF::partitionAllocSupportsGetSize; 939 using WTF::partitionAllocSupportsGetSize;
876 using WTF::partitionAllocGetSize; 940 using WTF::partitionAllocGetSize;
877 941
878 #endif // WTF_PartitionAlloc_h 942 #endif // WTF_PartitionAlloc_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/PageAllocator.cpp ('k') | third_party/WebKit/Source/wtf/PartitionAlloc.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698