| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 59 // blinkPagesPerRegion at a given random address. We group pages to | 59 // blinkPagesPerRegion at a given random address. We group pages to |
| 60 // not spread out too much over the address space which would blow | 60 // not spread out too much over the address space which would blow |
| 61 // away the page tables and lead to bad performance. | 61 // away the page tables and lead to bad performance. |
| 62 const size_t blinkPagesPerRegion = 10; | 62 const size_t blinkPagesPerRegion = 10; |
| 63 | 63 |
| 64 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte | 64 // Double precision floats are more efficient when 8 byte aligned, so we 8 byte |
| 65 // align all allocations even on 32 bit. | 65 // align all allocations even on 32 bit. |
| 66 const size_t allocationGranularity = 8; | 66 const size_t allocationGranularity = 8; |
| 67 const size_t allocationMask = allocationGranularity - 1; | 67 const size_t allocationMask = allocationGranularity - 1; |
| 68 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit
y) - 1)) / (8 * allocationGranularity); | 68 const size_t objectStartBitMapSize = (blinkPageSize + ((8 * allocationGranularit
y) - 1)) / (8 * allocationGranularity); |
| 69 const size_t reservedForObjectBitMap = ((objectStartBitMapSize + allocationMask)
& ~allocationMask); | 69 const size_t reservedForObjectStartBitMap = ((objectStartBitMapSize + allocation
Mask) & ~allocationMask); |
| 70 const size_t objectMarkBitMapSize = (blinkPageSize + ((8 * allocationGranularity
) - 1)) / (8 * allocationGranularity); |
| 71 const size_t reservedForObjectMarkBitMap = ((objectStartBitMapSize + allocationM
ask) & ~allocationMask); |
| 70 const size_t maxHeapObjectSizeLog2 = 27; | 72 const size_t maxHeapObjectSizeLog2 = 27; |
| 71 const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2; | 73 const size_t maxHeapObjectSize = 1 << maxHeapObjectSizeLog2; |
| 72 | 74 |
| 73 const size_t markBitMask = 1; | |
| 74 const size_t freeListMask = 2; | 75 const size_t freeListMask = 2; |
| 75 // The dead bit is used for objects that have gone through a GC marking, but did | 76 // The dead bit is used for objects that have gone through a GC marking, but did |
| 76 // not get swept before a new GC started. In that case we set the dead bit on | 77 // not get swept before a new GC started. In that case we set the dead bit on |
| 77 // objects that were not marked in the previous GC to ensure we are not tracing | 78 // objects that were not marked in the previous GC to ensure we are not tracing |
| 78 // them via a conservatively found pointer. Tracing dead objects could lead to | 79 // them via a conservatively found pointer. Tracing dead objects could lead to |
| 79 // tracing of already finalized objects in another thread's heap which is a | 80 // tracing of already finalized objects in another thread's heap which is a |
| 80 // use-after-free situation. | 81 // use-after-free situation. |
| 81 const size_t deadBitMask = 4; | 82 const size_t deadBitMask = 4; |
| 82 // On free-list entries we reuse the dead bit to distinguish a normal free-list | 83 // On free-list entries we reuse the dead bit to distinguish a normal free-list |
| 83 // entry from one that has been promptly freed. | 84 // entry from one that has been promptly freed. |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 172 // In order to use the same memory allocation routines for everything | 173 // In order to use the same memory allocation routines for everything |
| 173 // allocated in the heap, large objects are considered heap pages | 174 // allocated in the heap, large objects are considered heap pages |
| 174 // containing only one object. | 175 // containing only one object. |
| 175 // | 176 // |
| 176 // The layout of a large heap object is as follows: | 177 // The layout of a large heap object is as follows: |
| 177 // | 178 // |
| 178 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader
| payload | | 179 // | BaseHeapPage | next pointer | FinalizedHeapObjectHeader or HeapObjectHeader
| payload | |
| 179 template<typename Header> | 180 template<typename Header> |
| 180 class LargeHeapObject : public BaseHeapPage { | 181 class LargeHeapObject : public BaseHeapPage { |
| 181 public: | 182 public: |
| 182 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat
e) : BaseHeapPage(storage, gcInfo, state) | 183 LargeHeapObject(PageMemory* storage, const GCInfo* gcInfo, ThreadState* stat
e) |
| 184 : BaseHeapPage(storage, gcInfo, state) |
| 185 , m_next(nullptr) |
| 186 , m_marked(false) |
| 183 { | 187 { |
| 184 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg
e_heap_object_header_misaligned); | 188 COMPILE_ASSERT(!(sizeof(LargeHeapObject<Header>) & allocationMask), larg
e_heap_object_header_misaligned); |
| 185 } | 189 } |
| 186 | 190 |
| 187 virtual void checkAndMarkPointer(Visitor*, Address) override; | 191 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 188 virtual bool isLargeObject() override { return true; } | 192 virtual bool isLargeObject() override { return true; } |
| 189 | 193 |
| 194 virtual void markObject(Address address) override |
| 195 { |
| 196 ASSERT(!m_marked); |
| 197 m_marked = true; |
| 198 } |
| 199 |
| 200 virtual bool objectIsMarked(Address address) override |
| 201 { |
| 202 return m_marked; |
| 203 } |
| 204 |
| 205 void clearObjectMarkBitMap() |
| 206 { |
| 207 m_marked = false; |
| 208 } |
| 209 |
| 190 #if ENABLE(GC_PROFILE_MARKING) | 210 #if ENABLE(GC_PROFILE_MARKING) |
| 191 virtual const GCInfo* findGCInfo(Address address) | 211 virtual const GCInfo* findGCInfo(Address address) |
| 192 { | 212 { |
| 193 if (!objectContains(address)) | 213 if (!objectContains(address)) |
| 194 return 0; | 214 return 0; |
| 195 return gcInfo(); | 215 return gcInfo(); |
| 196 } | 216 } |
| 197 #endif | 217 #endif |
| 198 | 218 |
| 199 #if ENABLE(GC_PROFILE_HEAP) | 219 #if ENABLE(GC_PROFILE_HEAP) |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 | 258 |
| 239 Address payload() { return heapObjectHeader()->payload(); } | 259 Address payload() { return heapObjectHeader()->payload(); } |
| 240 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 260 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
| 241 | 261 |
| 242 Header* heapObjectHeader() | 262 Header* heapObjectHeader() |
| 243 { | 263 { |
| 244 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); | 264 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); |
| 245 return reinterpret_cast<Header*>(headerAddress); | 265 return reinterpret_cast<Header*>(headerAddress); |
| 246 } | 266 } |
| 247 | 267 |
| 268 void mark(Visitor*); |
| 248 bool isMarked(); | 269 bool isMarked(); |
| 249 void unmark(); | 270 |
| 250 size_t objectPayloadSizeForTesting(); | 271 size_t objectPayloadSizeForTesting(); |
| 251 void mark(Visitor*); | |
| 252 void finalize(); | 272 void finalize(); |
| 253 void setDeadMark(); | 273 void setDeadMark(); |
| 254 virtual void markOrphaned() | 274 virtual void markOrphaned() |
| 255 { | 275 { |
| 256 // Zap the payload with a recognizable value to detect any incorrect | 276 // Zap the payload with a recognizable value to detect any incorrect |
| 257 // cross thread pointer usage. | 277 // cross thread pointer usage. |
| 258 memset(payload(), orphanedZapValue, payloadSize()); | 278 memset(payload(), orphanedZapValue, payloadSize()); |
| 259 BaseHeapPage::markOrphaned(); | 279 BaseHeapPage::markOrphaned(); |
| 260 } | 280 } |
| 261 | 281 |
| 262 private: | 282 private: |
| 263 friend class ThreadHeap<Header>; | 283 friend class ThreadHeap<Header>; |
| 264 | 284 |
| 265 LargeHeapObject<Header>* m_next; | 285 LargeHeapObject<Header>* m_next; |
| 286 bool m_marked; |
| 266 }; | 287 }; |
| 267 | 288 |
| 268 // The BasicObjectHeader is the minimal object header. It is used when | 289 // The BasicObjectHeader is the minimal object header. It is used when |
| 269 // encountering heap space of size allocationGranularity to mark it as | 290 // encountering heap space of size allocationGranularity to mark it as |
| 270 // as freelist entry. | 291 // as freelist entry. |
| 271 class PLATFORM_EXPORT BasicObjectHeader { | 292 class PLATFORM_EXPORT BasicObjectHeader { |
| 272 public: | 293 public: |
| 273 NO_SANITIZE_ADDRESS | 294 NO_SANITIZE_ADDRESS |
| 274 explicit BasicObjectHeader(size_t encodedSize) | 295 explicit BasicObjectHeader(size_t encodedSize) |
| 275 : m_size(encodedSize) { } | 296 : m_size(encodedSize) { } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 333 | 354 |
| 334 NO_SANITIZE_ADDRESS | 355 NO_SANITIZE_ADDRESS |
| 335 HeapObjectHeader(size_t encodedSize, const GCInfo*) | 356 HeapObjectHeader(size_t encodedSize, const GCInfo*) |
| 336 : BasicObjectHeader(encodedSize) | 357 : BasicObjectHeader(encodedSize) |
| 337 #if ENABLE(ASSERT) | 358 #if ENABLE(ASSERT) |
| 338 , m_magic(magic) | 359 , m_magic(magic) |
| 339 #endif | 360 #endif |
| 340 { } | 361 { } |
| 341 | 362 |
| 342 inline void checkHeader() const; | 363 inline void checkHeader() const; |
| 343 inline bool isMarked() const; | |
| 344 | 364 |
| 345 inline void mark(); | 365 inline void mark(); |
| 346 inline void unmark(); | 366 inline bool isMarked() const; |
| 347 | 367 |
| 348 inline const GCInfo* gcInfo() { return 0; } | 368 inline const GCInfo* gcInfo() { return 0; } |
| 349 | 369 |
| 350 inline Address payload(); | 370 inline Address payload(); |
| 351 inline size_t payloadSize(); | 371 inline size_t payloadSize(); |
| 352 inline Address payloadEnd(); | 372 inline Address payloadEnd(); |
| 353 | 373 |
| 354 inline void setDeadMark(); | 374 inline void setDeadMark(); |
| 355 inline void clearDeadMark(); | 375 inline void clearDeadMark(); |
| 356 inline bool hasDeadMark() const; | 376 inline bool hasDeadMark() const; |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 490 template<typename Header> | 510 template<typename Header> |
| 491 class HeapPage : public BaseHeapPage { | 511 class HeapPage : public BaseHeapPage { |
| 492 public: | 512 public: |
| 493 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); | 513 HeapPage(PageMemory*, ThreadHeap<Header>*, const GCInfo*); |
| 494 | 514 |
| 495 void link(HeapPage**); | 515 void link(HeapPage**); |
| 496 static void unlink(ThreadHeap<Header>*, HeapPage*, HeapPage**); | 516 static void unlink(ThreadHeap<Header>*, HeapPage*, HeapPage**); |
| 497 | 517 |
| 498 bool isEmpty(); | 518 bool isEmpty(); |
| 499 | 519 |
| 520 virtual void markObject(Address object) override |
| 521 { |
| 522 size_t offset = (object - payload()) / allocationGranularity; |
| 523 ASSERT(!((object - payload()) & allocationMask)); |
| 524 ASSERT(!(m_objectMarkBitMap[offset / 8] & (1 << (offset & 7)))); |
| 525 m_objectMarkBitMap[offset / 8] |= (1 << (offset & 7)); |
| 526 } |
| 527 |
| 528 virtual bool objectIsMarked(Address object) override |
| 529 { |
| 530 size_t offset = (object - payload()) / allocationGranularity; |
| 531 ASSERT(!((object - payload()) & allocationMask)); |
| 532 return m_objectMarkBitMap[offset / 8] & (1 << (offset & 7)); |
| 533 } |
| 534 |
| 535 void clearObjectMarkBitMap(); |
| 536 |
| 500 // Returns true for the whole blinkPageSize page that the page is on, even | 537 // Returns true for the whole blinkPageSize page that the page is on, even |
| 501 // for the header, and the unmapped guard page at the start. That ensures | 538 // for the header, and the unmapped guard page at the start. That ensures |
| 502 // the result can be used to populate the negative page cache. | 539 // the result can be used to populate the negative page cache. |
| 503 virtual bool contains(Address addr) override | 540 virtual bool contains(Address addr) override |
| 504 { | 541 { |
| 505 Address blinkPageStart = roundToBlinkPageStart(address()); | 542 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 506 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 543 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
| 507 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 544 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 508 } | 545 } |
| 509 | 546 |
| 510 HeapPage* next() { return m_next; } | 547 HeapPage* next() { return m_next; } |
| 511 | 548 |
| 512 Address payload() | 549 Address payload() |
| 513 { | 550 { |
| 514 return address() + sizeof(*this) + headerPadding<Header>(); | 551 return address() + sizeof(*this) + headerPadding<Header>(); |
| 515 } | 552 } |
| 516 | 553 |
| 517 static size_t payloadSize() | 554 static size_t payloadSize() |
| 518 { | 555 { |
| 519 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; | 556 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; |
| 520 } | 557 } |
| 521 | 558 |
| 522 Address end() { return payload() + payloadSize(); } | 559 Address end() { return payload() + payloadSize(); } |
| 523 | 560 |
| 524 size_t objectPayloadSizeForTesting(); | 561 size_t objectPayloadSizeForTesting(); |
| 525 void clearLiveAndMarkDead(); | 562 void makeUnmarkedObjectsDead(); |
| 526 void sweep(ThreadHeap<Header>*); | 563 void sweep(ThreadHeap<Header>*); |
| 527 void clearObjectStartBitMap(); | 564 void clearObjectStartBitMap(); |
| 528 void finalize(Header*); | 565 void finalize(Header*); |
| 529 virtual void checkAndMarkPointer(Visitor*, Address) override; | 566 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 530 #if ENABLE(GC_PROFILE_MARKING) | 567 #if ENABLE(GC_PROFILE_MARKING) |
| 531 const GCInfo* findGCInfo(Address) override; | 568 const GCInfo* findGCInfo(Address) override; |
| 532 #endif | 569 #endif |
| 533 #if ENABLE(GC_PROFILE_HEAP) | 570 #if ENABLE(GC_PROFILE_HEAP) |
| 534 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 571 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 535 #endif | 572 #endif |
| (...skipping 23 matching lines...) Expand all Loading... |
| 559 void populateObjectStartBitMap(); | 596 void populateObjectStartBitMap(); |
| 560 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 597 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 561 TraceCallback traceCallback(Header*); | 598 TraceCallback traceCallback(Header*); |
| 562 bool hasVTable(Header*); | 599 bool hasVTable(Header*); |
| 563 | 600 |
| 564 intptr_t padding() const { return m_padding; } | 601 intptr_t padding() const { return m_padding; } |
| 565 | 602 |
| 566 HeapPage<Header>* m_next; | 603 HeapPage<Header>* m_next; |
| 567 intptr_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 604 intptr_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 568 bool m_objectStartBitMapComputed; | 605 bool m_objectStartBitMapComputed; |
| 569 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 606 uint8_t m_objectStartBitMap[reservedForObjectStartBitMap]; |
| 607 uint8_t m_objectMarkBitMap[reservedForObjectMarkBitMap]; |
| 570 | 608 |
| 571 friend class ThreadHeap<Header>; | 609 friend class ThreadHeap<Header>; |
| 572 }; | 610 }; |
| 573 | 611 |
| 574 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | 612 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary |
| 575 // pointer-sized word, and determining whether it cannot be interpreted | 613 // pointer-sized word, and determining whether it cannot be interpreted |
| 576 // as a pointer to an area that is managed by the garbage collected | 614 // as a pointer to an area that is managed by the garbage collected |
| 577 // Blink heap. This is a cache of 'pages' that have previously been | 615 // Blink heap. This is a cache of 'pages' that have previously been |
| 578 // determined to be wholly outside of the heap. The size of these pages must be | 616 // determined to be wholly outside of the heap. The size of these pages must be |
| 579 // smaller than the allocation alignment of the heap pages. We determine | 617 // smaller than the allocation alignment of the heap pages. We determine |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 687 #if ENABLE(GC_PROFILE_HEAP) | 725 #if ENABLE(GC_PROFILE_HEAP) |
| 688 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; | 726 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; |
| 689 #endif | 727 #endif |
| 690 | 728 |
| 691 // Sweep this part of the Blink heap. This finalizes dead objects | 729 // Sweep this part of the Blink heap. This finalizes dead objects |
| 692 // and builds freelists for all the unused memory. | 730 // and builds freelists for all the unused memory. |
| 693 virtual void sweep() = 0; | 731 virtual void sweep() = 0; |
| 694 virtual void postSweepProcessing() = 0; | 732 virtual void postSweepProcessing() = 0; |
| 695 | 733 |
| 696 virtual void clearFreeLists() = 0; | 734 virtual void clearFreeLists() = 0; |
| 697 virtual void clearLiveAndMarkDead() = 0; | 735 virtual void clearObjectMarkBitMaps() = 0; |
| 736 virtual void makeUnmarkedObjectsDead() = 0; |
| 698 | 737 |
| 699 virtual void makeConsistentForSweeping() = 0; | 738 virtual void makeConsistentForSweeping() = 0; |
| 700 #if ENABLE(ASSERT) | 739 #if ENABLE(ASSERT) |
| 701 virtual bool isConsistentForSweeping() = 0; | 740 virtual bool isConsistentForSweeping() = 0; |
| 702 #endif | 741 #endif |
| 703 virtual size_t objectPayloadSizeForTesting() = 0; | 742 virtual size_t objectPayloadSizeForTesting() = 0; |
| 704 | 743 |
| 705 virtual void updateRemainingAllocationSize() = 0; | 744 virtual void updateRemainingAllocationSize() = 0; |
| 706 | 745 |
| 707 virtual void prepareHeapForTermination() = 0; | 746 virtual void prepareHeapForTermination() = 0; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) override; | 793 virtual const GCInfo* findGCInfoOfLargeHeapObject(Address) override; |
| 755 #endif | 794 #endif |
| 756 #if ENABLE(GC_PROFILE_HEAP) | 795 #if ENABLE(GC_PROFILE_HEAP) |
| 757 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | 796 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
| 758 #endif | 797 #endif |
| 759 | 798 |
| 760 virtual void sweep() override; | 799 virtual void sweep() override; |
| 761 virtual void postSweepProcessing() override; | 800 virtual void postSweepProcessing() override; |
| 762 | 801 |
| 763 virtual void clearFreeLists() override; | 802 virtual void clearFreeLists() override; |
| 764 virtual void clearLiveAndMarkDead() override; | 803 virtual void clearObjectMarkBitMaps() override; |
| 804 virtual void makeUnmarkedObjectsDead() override; |
| 765 | 805 |
| 766 virtual void makeConsistentForSweeping() override; | 806 virtual void makeConsistentForSweeping() override; |
| 767 #if ENABLE(ASSERT) | 807 #if ENABLE(ASSERT) |
| 768 virtual bool isConsistentForSweeping() override; | 808 virtual bool isConsistentForSweeping() override; |
| 769 #endif | 809 #endif |
| 770 virtual size_t objectPayloadSizeForTesting() override; | 810 virtual size_t objectPayloadSizeForTesting() override; |
| 771 | 811 |
| 772 virtual void updateRemainingAllocationSize() override; | 812 virtual void updateRemainingAllocationSize() override; |
| 773 | 813 |
| 774 ThreadState* threadState() { return m_threadState; } | 814 ThreadState* threadState() { return m_threadState; } |
| (...skipping 525 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1300 size_t HeapObjectHeader::payloadSize() | 1340 size_t HeapObjectHeader::payloadSize() |
| 1301 { | 1341 { |
| 1302 return size() - objectHeaderSize; | 1342 return size() - objectHeaderSize; |
| 1303 } | 1343 } |
| 1304 | 1344 |
| 1305 Address HeapObjectHeader::payloadEnd() | 1345 Address HeapObjectHeader::payloadEnd() |
| 1306 { | 1346 { |
| 1307 return reinterpret_cast<Address>(this) + size(); | 1347 return reinterpret_cast<Address>(this) + size(); |
| 1308 } | 1348 } |
| 1309 | 1349 |
| 1310 NO_SANITIZE_ADDRESS | |
| 1311 void HeapObjectHeader::mark() | 1350 void HeapObjectHeader::mark() |
| 1312 { | 1351 { |
| 1313 checkHeader(); | 1352 checkHeader(); |
| 1314 m_size = m_size | markBitMask; | 1353 pageHeaderFromObject(this)->markObject(reinterpret_cast<Address>(this)); |
| 1315 } | 1354 } |
| 1316 | 1355 |
| 1317 Address FinalizedHeapObjectHeader::payload() | 1356 Address FinalizedHeapObjectHeader::payload() |
| 1318 { | 1357 { |
| 1319 return reinterpret_cast<Address>(this) + finalizedHeaderSize; | 1358 return reinterpret_cast<Address>(this) + finalizedHeaderSize; |
| 1320 } | 1359 } |
| 1321 | 1360 |
| 1322 size_t FinalizedHeapObjectHeader::payloadSize() | 1361 size_t FinalizedHeapObjectHeader::payloadSize() |
| 1323 { | 1362 { |
| 1324 return size() - finalizedHeaderSize; | 1363 return size() - finalizedHeaderSize; |
| (...skipping 1030 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2355 template<typename T, size_t inlineCapacity> | 2394 template<typename T, size_t inlineCapacity> |
| 2356 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T
, inlineCapacity, HeapAllocator> > { }; | 2395 struct GCInfoTrait<HeapVector<T, inlineCapacity> > : public GCInfoTrait<Vector<T
, inlineCapacity, HeapAllocator> > { }; |
| 2357 template<typename T, size_t inlineCapacity> | 2396 template<typename T, size_t inlineCapacity> |
| 2358 struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T,
inlineCapacity, HeapAllocator> > { }; | 2397 struct GCInfoTrait<HeapDeque<T, inlineCapacity> > : public GCInfoTrait<Deque<T,
inlineCapacity, HeapAllocator> > { }; |
| 2359 template<typename T, typename U, typename V> | 2398 template<typename T, typename U, typename V> |
| 2360 struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCounte
dSet<T, U, V, HeapAllocator> > { }; | 2399 struct GCInfoTrait<HeapHashCountedSet<T, U, V> > : public GCInfoTrait<HashCounte
dSet<T, U, V, HeapAllocator> > { }; |
| 2361 | 2400 |
| 2362 } // namespace blink | 2401 } // namespace blink |
| 2363 | 2402 |
| 2364 #endif // Heap_h | 2403 #endif // Heap_h |
| OLD | NEW |