| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
| 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
| 85 #else | 85 #else |
| 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
| 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
| 88 #endif | 88 #endif |
| 89 | 89 |
| 90 class CallbackStack; | 90 class CallbackStack; |
| 91 class PageMemory; | 91 class PageMemory; |
| 92 class ThreadHeapForHeapPage; |
| 92 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 93 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| 93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; | 94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; |
| 94 | 95 |
| 95 #if ENABLE(GC_PROFILE_HEAP) | 96 #if ENABLE(GC_PROFILE_HEAP) |
| 96 class TracedValue; | 97 class TracedValue; |
| 97 #endif | 98 #endif |
| 98 | 99 |
| 99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
| 100 // | 101 // |
| 101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | | 102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 357 } | 358 } |
| 358 #endif | 359 #endif |
| 359 | 360 |
| 360 // FIXME: Add a good comment about the heap layout once heap relayout work | 361 // FIXME: Add a good comment about the heap layout once heap relayout work |
| 361 // is done. | 362 // is done. |
| 362 class BaseHeapPage { | 363 class BaseHeapPage { |
| 363 public: | 364 public: |
| 364 BaseHeapPage(PageMemory*, ThreadHeap*); | 365 BaseHeapPage(PageMemory*, ThreadHeap*); |
| 365 virtual ~BaseHeapPage() { } | 366 virtual ~BaseHeapPage() { } |
| 366 | 367 |
| 368 void link(BaseHeapPage** previousNext) |
| 369 { |
| 370 m_next = *previousNext; |
| 371 *previousNext = this; |
| 372 } |
| 373 void unlink(BaseHeapPage** previousNext) |
| 374 { |
| 375 *previousNext = m_next; |
| 376 m_next = nullptr; |
| 377 } |
| 378 BaseHeapPage* next() const { return m_next; } |
| 379 |
| 367 // virtual methods are slow. So performance-sensitive methods | 380 // virtual methods are slow. So performance-sensitive methods |
| 368 // should be defined as non-virtual methods on HeapPage and LargeObject. | 381 // should be defined as non-virtual methods on HeapPage and LargeObject. |
| 369 // The following methods are not performance-sensitive. | 382 // The following methods are not performance-sensitive. |
| 370 virtual size_t objectPayloadSizeForTesting() = 0; | 383 virtual size_t objectPayloadSizeForTesting() = 0; |
| 371 virtual bool isEmpty() = 0; | 384 virtual bool isEmpty() = 0; |
| 372 virtual void removeFromHeap(ThreadHeap*) = 0; | 385 virtual void removeFromHeap() = 0; |
| 373 virtual void sweep() = 0; | 386 virtual void sweep() = 0; |
| 374 virtual void markUnmarkedObjectsDead() = 0; | 387 virtual void markUnmarkedObjectsDead() = 0; |
| 375 // Check if the given address points to an object in this | 388 // Check if the given address points to an object in this |
| 376 // heap page. If so, find the start of that object and mark it | 389 // heap page. If so, find the start of that object and mark it |
| 377 // using the given Visitor. Otherwise do nothing. The pointer must | 390 // using the given Visitor. Otherwise do nothing. The pointer must |
| 378 // be within the same aligned blinkPageSize as the this-pointer. | 391 // be within the same aligned blinkPageSize as the this-pointer. |
| 379 // | 392 // |
| 380 // This is used during conservative stack scanning to | 393 // This is used during conservative stack scanning to |
| 381 // conservatively mark all objects that could be referenced from | 394 // conservatively mark all objects that could be referenced from |
| 382 // the stack. | 395 // the stack. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 412 | 425 |
| 413 void markAsUnswept() | 426 void markAsUnswept() |
| 414 { | 427 { |
| 415 ASSERT(m_swept); | 428 ASSERT(m_swept); |
| 416 m_swept = false; | 429 m_swept = false; |
| 417 } | 430 } |
| 418 | 431 |
| 419 private: | 432 private: |
| 420 PageMemory* m_storage; | 433 PageMemory* m_storage; |
| 421 ThreadHeap* m_heap; | 434 ThreadHeap* m_heap; |
| 435 BaseHeapPage* m_next; |
| 422 // Whether the page is part of a terminating thread or not. | 436 // Whether the page is part of a terminating thread or not. |
| 423 bool m_terminating; | 437 bool m_terminating; |
| 424 | 438 |
| 425 // Track the sweeping state of a page. Set to true once | 439 // Track the sweeping state of a page. Set to true once |
| 426 // the lazy sweep completes has processed it. | 440 // the lazy sweep completes has processed it. |
| 427 // | 441 // |
| 428 // Set to false at the start of a sweep, true upon completion | 442 // Set to false at the start of a sweep, true upon completion |
| 429 // of lazy sweeping. | 443 // of lazy sweeping. |
| 430 bool m_swept; | 444 bool m_swept; |
| 445 friend class ThreadHeap; |
| 431 }; | 446 }; |
| 432 | 447 |
| 433 class HeapPage final : public BaseHeapPage { | 448 class HeapPage final : public BaseHeapPage { |
| 434 public: | 449 public: |
| 435 HeapPage(PageMemory*, ThreadHeap*); | 450 HeapPage(PageMemory*, ThreadHeap*); |
| 436 | 451 |
| 437 Address payload() | 452 Address payload() |
| 438 { | 453 { |
| 439 return address() + sizeof(HeapPage) + headerPadding(); | 454 return address() + sizeof(HeapPage) + headerPadding(); |
| 440 } | 455 } |
| 441 size_t payloadSize() | 456 size_t payloadSize() |
| 442 { | 457 { |
| 443 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; | 458 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; |
| 444 } | 459 } |
| 445 Address payloadEnd() { return payload() + payloadSize(); } | 460 Address payloadEnd() { return payload() + payloadSize(); } |
| 446 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 461 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 447 | 462 |
| 448 void link(HeapPage** previousNext) | |
| 449 { | |
| 450 m_next = *previousNext; | |
| 451 *previousNext = this; | |
| 452 } | |
| 453 | |
| 454 void unlink(HeapPage** previousNext) | |
| 455 { | |
| 456 *previousNext = m_next; | |
| 457 m_next = nullptr; | |
| 458 } | |
| 459 | |
| 460 virtual size_t objectPayloadSizeForTesting() override; | 463 virtual size_t objectPayloadSizeForTesting() override; |
| 461 virtual bool isEmpty() override; | 464 virtual bool isEmpty() override; |
| 462 virtual void removeFromHeap(ThreadHeap*) override; | 465 virtual void removeFromHeap() override; |
| 463 virtual void sweep() override; | 466 virtual void sweep() override; |
| 464 virtual void markUnmarkedObjectsDead() override; | 467 virtual void markUnmarkedObjectsDead() override; |
| 465 virtual void checkAndMarkPointer(Visitor*, Address) override; | 468 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 466 virtual void markOrphaned() override | 469 virtual void markOrphaned() override |
| 467 { | 470 { |
| 468 // Zap the payload with a recognizable value to detect any incorrect | 471 // Zap the payload with a recognizable value to detect any incorrect |
| 469 // cross thread pointer usage. | 472 // cross thread pointer usage. |
| 470 #if defined(ADDRESS_SANITIZER) | 473 #if defined(ADDRESS_SANITIZER) |
| 471 // This needs to zap poisoned memory as well. | 474 // This needs to zap poisoned memory as well. |
| 472 // Force unpoison memory before memset. | 475 // Force unpoison memory before memset. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 487 // the result can be used to populate the negative page cache. | 490 // the result can be used to populate the negative page cache. |
| 488 virtual bool contains(Address addr) override | 491 virtual bool contains(Address addr) override |
| 489 { | 492 { |
| 490 Address blinkPageStart = roundToBlinkPageStart(address()); | 493 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 491 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 494 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
| 492 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 495 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 493 } | 496 } |
| 494 #endif | 497 #endif |
| 495 virtual size_t size() override { return blinkPageSize; } | 498 virtual size_t size() override { return blinkPageSize; } |
| 496 | 499 |
| 497 HeapPage* next() { return m_next; } | 500 ThreadHeapForHeapPage* heapForHeapPage(); |
| 498 | |
| 499 void clearObjectStartBitMap(); | 501 void clearObjectStartBitMap(); |
| 500 | 502 |
| 501 #if defined(ADDRESS_SANITIZER) | 503 #if defined(ADDRESS_SANITIZER) |
| 502 void poisonUnmarkedObjects(); | 504 void poisonUnmarkedObjects(); |
| 503 #endif | 505 #endif |
| 504 | 506 |
| 505 // This method is needed just to avoid compilers from removing m_padding. | 507 // This method is needed just to avoid compilers from removing m_padding. |
| 506 uint64_t unusedMethod() const { return m_padding; } | 508 uint64_t unusedMethod() const { return m_padding; } |
| 507 | 509 |
| 508 private: | 510 private: |
| 509 HeapObjectHeader* findHeaderFromAddress(Address); | 511 HeapObjectHeader* findHeaderFromAddress(Address); |
| 510 void populateObjectStartBitMap(); | 512 void populateObjectStartBitMap(); |
| 511 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 513 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 512 | 514 |
| 513 HeapPage* m_next; | |
| 514 bool m_objectStartBitMapComputed; | 515 bool m_objectStartBitMapComputed; |
| 515 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 516 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 516 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 517 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 517 | |
| 518 friend class ThreadHeap; | |
| 519 }; | 518 }; |
| 520 | 519 |
| 521 // Large allocations are allocated as separate objects and linked in a list. | 520 // Large allocations are allocated as separate objects and linked in a list. |
| 522 // | 521 // |
| 523 // In order to use the same memory allocation routines for everything allocated | 522 // In order to use the same memory allocation routines for everything allocated |
| 524 // in the heap, large objects are considered heap pages containing only one | 523 // in the heap, large objects are considered heap pages containing only one |
| 525 // object. | 524 // object. |
| 526 class LargeObject final : public BaseHeapPage { | 525 class LargeObject final : public BaseHeapPage { |
| 527 public: | 526 public: |
| 528 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) | 527 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) |
| 529 : BaseHeapPage(storage, heap) | 528 : BaseHeapPage(storage, heap) |
| 530 , m_payloadSize(payloadSize) | 529 , m_payloadSize(payloadSize) |
| 531 { | 530 { |
| 532 } | 531 } |
| 533 | 532 |
| 534 Address payload() { return heapObjectHeader()->payload(); } | 533 Address payload() { return heapObjectHeader()->payload(); } |
| 535 size_t payloadSize() { return m_payloadSize; } | 534 size_t payloadSize() { return m_payloadSize; } |
| 536 Address payloadEnd() { return payload() + payloadSize(); } | 535 Address payloadEnd() { return payload() + payloadSize(); } |
| 537 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 536 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 538 | 537 |
| 539 virtual size_t objectPayloadSizeForTesting() override; | 538 virtual size_t objectPayloadSizeForTesting() override; |
| 540 virtual bool isEmpty() override; | 539 virtual bool isEmpty() override; |
| 541 virtual void removeFromHeap(ThreadHeap*) override; | 540 virtual void removeFromHeap() override; |
| 542 virtual void sweep() override; | 541 virtual void sweep() override; |
| 543 virtual void markUnmarkedObjectsDead() override; | 542 virtual void markUnmarkedObjectsDead() override; |
| 544 virtual void checkAndMarkPointer(Visitor*, Address) override; | 543 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 545 virtual void markOrphaned() override | 544 virtual void markOrphaned() override |
| 546 { | 545 { |
| 547 // Zap the payload with a recognizable value to detect any incorrect | 546 // Zap the payload with a recognizable value to detect any incorrect |
| 548 // cross thread pointer usage. | 547 // cross thread pointer usage. |
| 549 memset(payload(), orphanedZapValue, payloadSize()); | 548 memset(payload(), orphanedZapValue, payloadSize()); |
| 550 BaseHeapPage::markOrphaned(); | 549 BaseHeapPage::markOrphaned(); |
| 551 } | 550 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 563 { | 562 { |
| 564 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); | 563 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
| 565 } | 564 } |
| 566 #endif | 565 #endif |
| 567 virtual size_t size() | 566 virtual size_t size() |
| 568 { | 567 { |
| 569 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; | 568 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; |
| 570 } | 569 } |
| 571 virtual bool isLargeObject() override { return true; } | 570 virtual bool isLargeObject() override { return true; } |
| 572 | 571 |
| 573 void link(LargeObject** previousNext) | |
| 574 { | |
| 575 m_next = *previousNext; | |
| 576 *previousNext = this; | |
| 577 } | |
| 578 | |
| 579 void unlink(LargeObject** previousNext) | |
| 580 { | |
| 581 *previousNext = m_next; | |
| 582 m_next = nullptr; | |
| 583 } | |
| 584 | |
| 585 LargeObject* next() | |
| 586 { | |
| 587 return m_next; | |
| 588 } | |
| 589 | |
| 590 HeapObjectHeader* heapObjectHeader() | 572 HeapObjectHeader* heapObjectHeader() |
| 591 { | 573 { |
| 592 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); | 574 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); |
| 593 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | 575 return reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 594 } | 576 } |
| 595 | 577 |
| 596 // This method is needed just to avoid compilers from removing m_padding. | 578 // This method is needed just to avoid compilers from removing m_padding. |
| 597 uint64_t unusedMethod() const { return m_padding; } | 579 uint64_t unusedMethod() const { return m_padding; } |
| 598 | 580 |
| 599 private: | 581 private: |
| 600 friend class ThreadHeap; | |
| 601 LargeObject* m_next; | |
| 602 size_t m_payloadSize; | 582 size_t m_payloadSize; |
| 603 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | 583 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 604 }; | 584 }; |
| 605 | 585 |
| 606 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | 586 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary |
| 607 // pointer-sized word, and determining whether it cannot be interpreted as a | 587 // pointer-sized word, and determining whether it cannot be interpreted as a |
| 608 // pointer to an area that is managed by the garbage collected Blink heap. This | 588 // pointer to an area that is managed by the garbage collected Blink heap. This |
| 609 // is a cache of 'pages' that have previously been determined to be wholly | 589 // is a cache of 'pages' that have previously been determined to be wholly |
| 610 // outside of the heap. The size of these pages must be smaller than the | 590 // outside of the heap. The size of these pages must be smaller than the |
| 611 // allocation alignment of the heap pages. We determine off-heap-ness by | 591 // allocation alignment of the heap pages. We determine off-heap-ness by |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 711 // Returns a bucket number for inserting a FreeListEntry of a given size. | 691 // Returns a bucket number for inserting a FreeListEntry of a given size. |
| 712 // All FreeListEntries in the given bucket, n, have size >= 2^n. | 692 // All FreeListEntries in the given bucket, n, have size >= 2^n. |
| 713 static int bucketIndexForSize(size_t); | 693 static int bucketIndexForSize(size_t); |
| 714 | 694 |
| 715 private: | 695 private: |
| 716 int m_biggestFreeListIndex; | 696 int m_biggestFreeListIndex; |
| 717 | 697 |
| 718 // All FreeListEntries in the nth list have size >= 2^n. | 698 // All FreeListEntries in the nth list have size >= 2^n. |
| 719 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 699 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 720 | 700 |
| 721 friend class ThreadHeap; | 701 friend class ThreadHeapForHeapPage; |
| 722 }; | 702 }; |
| 723 | 703 |
| 724 // Thread heaps represent a part of the per-thread Blink heap. | 704 // Thread heaps represent a part of the per-thread Blink heap. |
| 725 // | 705 // |
| 726 // Each Blink thread has a number of thread heaps: one general heap | 706 // Each Blink thread has a number of thread heaps: one general heap |
| 727 // that contains any type of object and a number of heaps specialized | 707 // that contains any type of object and a number of heaps specialized |
| 728 // for specific object types (such as Node). | 708 // for specific object types (such as Node). |
| 729 // | 709 // |
| 730 // Each thread heap contains the functionality to allocate new objects | 710 // Each thread heap contains the functionality to allocate new objects |
| 731 // (potentially adding new pages to the heap), to find and mark | 711 // (potentially adding new pages to the heap), to find and mark |
| 732 // objects during conservative stack scanning and to sweep the set of | 712 // objects during conservative stack scanning and to sweep the set of |
| 733 // pages after a GC. | 713 // pages after a GC. |
| 734 class PLATFORM_EXPORT ThreadHeap final { | 714 class PLATFORM_EXPORT ThreadHeap { |
| 735 public: | 715 public: |
| 736 ThreadHeap(ThreadState*, int); | 716 ThreadHeap(ThreadState*, int); |
| 737 ~ThreadHeap(); | 717 virtual ~ThreadHeap(); |
| 738 void cleanupPages(); | 718 void cleanupPages(); |
| 739 | 719 |
| 740 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING) | 720 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING) |
| 741 BaseHeapPage* findPageFromAddress(Address); | 721 BaseHeapPage* findPageFromAddress(Address); |
| 742 #endif | 722 #endif |
| 743 #if ENABLE(GC_PROFILE_HEAP) | 723 #if ENABLE(GC_PROFILE_HEAP) |
| 744 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 724 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 745 #endif | 725 #endif |
| 746 | 726 |
| 747 void clearFreeLists(); | 727 virtual void clearFreeLists() { } |
| 748 void makeConsistentForSweeping(); | 728 void makeConsistentForSweeping(); |
| 749 #if ENABLE(ASSERT) | 729 #if ENABLE(ASSERT) |
| 750 bool isConsistentForSweeping(); | 730 virtual bool isConsistentForSweeping() { return true; } |
| 751 #endif | 731 #endif |
| 752 size_t objectPayloadSizeForTesting(); | 732 size_t objectPayloadSizeForTesting(); |
| 733 void prepareHeapForTermination(); |
| 734 void prepareForSweep(); |
| 735 Address lazySweep(size_t, size_t gcInfoIndex); |
| 736 void completeSweep(); |
| 753 | 737 |
| 754 ThreadState* threadState() { return m_threadState; } | 738 ThreadState* threadState() { return m_threadState; } |
| 739 int heapIndex() const { return m_index; } |
| 740 inline static size_t allocationSizeFromSize(size_t); |
| 741 inline static size_t roundedAllocationSize(size_t size) |
| 742 { |
| 743 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 744 } |
| 755 | 745 |
| 746 protected: |
| 747 BaseHeapPage* m_firstPage; |
| 748 BaseHeapPage* m_firstUnsweptPage; |
| 749 |
| 750 private: |
| 751 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; |
| 752 |
| 753 ThreadState* m_threadState; |
| 754 |
| 755 // Index into the page pools. This is used to ensure that the pages of the |
| 756 // same type go into the correct page pool and thus avoid type confusion. |
| 757 int m_index; |
| 758 }; |
| 759 |
| 760 class ThreadHeapForHeapPage final : public ThreadHeap { |
| 761 public: |
| 762 ThreadHeapForHeapPage(ThreadState*, int); |
| 756 void addToFreeList(Address address, size_t size) | 763 void addToFreeList(Address address, size_t size) |
| 757 { | 764 { |
| 758 ASSERT(findPageFromAddress(address)); | 765 ASSERT(findPageFromAddress(address)); |
| 759 ASSERT(findPageFromAddress(address + size - 1)); | 766 ASSERT(findPageFromAddress(address + size - 1)); |
| 760 m_freeList.addToFreeList(address, size); | 767 m_freeList.addToFreeList(address, size); |
| 761 } | 768 } |
| 769 virtual void clearFreeLists() override; |
| 770 #if ENABLE(ASSERT) |
| 771 virtual bool isConsistentForSweeping() override; |
| 772 bool pagesToBeSweptContains(Address); |
| 773 #endif |
| 762 | 774 |
| 763 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); | 775 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); |
| 764 inline static size_t roundedAllocationSize(size_t size) | 776 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 765 { | |
| 766 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | |
| 767 } | |
| 768 inline static size_t allocationSizeFromSize(size_t); | |
| 769 | |
| 770 void prepareHeapForTermination(); | |
| 771 void prepareForSweep(); | |
| 772 void completeSweep(); | |
| 773 | 777 |
| 774 void freePage(HeapPage*); | 778 void freePage(HeapPage*); |
| 775 void freeLargeObject(LargeObject*); | |
| 776 | 779 |
| 780 bool coalesce(); |
| 777 void promptlyFreeObject(HeapObjectHeader*); | 781 void promptlyFreeObject(HeapObjectHeader*); |
| 778 bool expandObject(HeapObjectHeader*, size_t); | 782 bool expandObject(HeapObjectHeader*, size_t); |
| 779 void shrinkObject(HeapObjectHeader*, size_t); | 783 void shrinkObject(HeapObjectHeader*, size_t); |
| 780 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 784 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
| 781 | 785 |
| 782 private: | 786 private: |
| 787 void allocatePage(); |
| 788 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 783 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 789 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
| 784 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 790 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 785 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 791 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 786 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | 792 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } |
| 787 inline void setAllocationPoint(Address, size_t); | 793 inline void setAllocationPoint(Address, size_t); |
| 788 void updateRemainingAllocationSize(); | 794 void updateRemainingAllocationSize(); |
| 789 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 795 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 790 Address lazySweepPages(size_t, size_t gcInfoIndex); | |
| 791 bool lazySweepLargeObjects(size_t); | |
| 792 | 796 |
| 793 void allocatePage(); | 797 FreeList m_freeList; |
| 794 Address allocateLargeObject(size_t, size_t gcInfoIndex); | |
| 795 | |
| 796 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | |
| 797 | |
| 798 #if ENABLE(ASSERT) | |
| 799 bool pagesToBeSweptContains(Address); | |
| 800 #endif | |
| 801 | |
| 802 bool coalesce(); | |
| 803 void preparePagesForSweeping(); | |
| 804 | |
| 805 Address m_currentAllocationPoint; | 798 Address m_currentAllocationPoint; |
| 806 size_t m_remainingAllocationSize; | 799 size_t m_remainingAllocationSize; |
| 807 size_t m_lastRemainingAllocationSize; | 800 size_t m_lastRemainingAllocationSize; |
| 808 | 801 |
| 809 HeapPage* m_firstPage; | |
| 810 LargeObject* m_firstLargeObject; | |
| 811 HeapPage* m_firstUnsweptPage; | |
| 812 LargeObject* m_firstUnsweptLargeObject; | |
| 813 | |
| 814 ThreadState* m_threadState; | |
| 815 | |
| 816 FreeList m_freeList; | |
| 817 | |
| 818 // Index into the page pools. This is used to ensure that the pages of the | |
| 819 // same type go into the correct page pool and thus avoid type confusion. | |
| 820 int m_index; | |
| 821 | |
| 822 // The size of promptly freed objects in the heap. | 802 // The size of promptly freed objects in the heap. |
| 823 size_t m_promptlyFreedSize; | 803 size_t m_promptlyFreedSize; |
| 824 }; | 804 }; |
| 825 | 805 |
| 806 class ThreadHeapForLargeObject final : public ThreadHeap { |
| 807 public: |
| 808 ThreadHeapForLargeObject(ThreadState*, int); |
| 809 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 810 void freeLargeObject(LargeObject*); |
| 811 private: |
| 812 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); |
| 813 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 814 }; |
| 815 |
| 826 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 816 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
| 827 // pages are aligned at blinkPageBase plus an OS page size. | 817 // pages are aligned at blinkPageBase plus an OS page size. |
| 828 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 818 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
| 829 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | 819 // typed heaps. This is only exported to enable tests in HeapTest.cpp. |
| 830 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | 820 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) |
| 831 { | 821 { |
| 832 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 822 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 833 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); | 823 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); |
| 834 ASSERT(page->contains(address)); | 824 ASSERT(page->contains(address)); |
| 835 return page; | 825 return page; |
| (...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1330 // therefore has to happen before any calculation on the size. | 1320 // therefore has to happen before any calculation on the size. |
| 1331 RELEASE_ASSERT(size < maxHeapObjectSize); | 1321 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 1332 | 1322 |
| 1333 // Add space for header. | 1323 // Add space for header. |
| 1334 size_t allocationSize = size + sizeof(HeapObjectHeader); | 1324 size_t allocationSize = size + sizeof(HeapObjectHeader); |
| 1335 // Align size with allocation granularity. | 1325 // Align size with allocation granularity. |
| 1336 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1326 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1337 return allocationSize; | 1327 return allocationSize; |
| 1338 } | 1328 } |
| 1339 | 1329 |
| 1340 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) | 1330 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
foIndex) |
| 1341 { | 1331 { |
| 1342 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1332 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1343 Address headerAddress = m_currentAllocationPoint; | 1333 Address headerAddress = m_currentAllocationPoint; |
| 1344 m_currentAllocationPoint += allocationSize; | 1334 m_currentAllocationPoint += allocationSize; |
| 1345 m_remainingAllocationSize -= allocationSize; | 1335 m_remainingAllocationSize -= allocationSize; |
| 1346 ASSERT(gcInfoIndex > 0); | 1336 ASSERT(gcInfoIndex > 0); |
| 1347 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | 1337 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); |
| 1348 Address result = headerAddress + sizeof(HeapObjectHeader); | 1338 Address result = headerAddress + sizeof(HeapObjectHeader); |
| 1349 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1339 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1350 | 1340 |
| 1351 // Unpoison the memory used for the object (payload). | 1341 // Unpoison the memory used for the object (payload). |
| 1352 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1342 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1353 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1343 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1354 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 1344 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
| 1355 return result; | 1345 return result; |
| 1356 } | 1346 } |
| 1357 return outOfLineAllocate(allocationSize, gcInfoIndex); | 1347 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 1358 } | 1348 } |
| 1359 | 1349 |
| 1360 Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex) | 1350 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) |
| 1361 { | 1351 { |
| 1362 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1352 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
| 1363 } | 1353 } |
| 1364 | 1354 |
| 1365 template<typename T> | 1355 template<typename T> |
| 1366 struct HeapIndexTrait { | 1356 struct HeapIndexTrait { |
| 1367 static int index() { return GeneralHeap; }; | 1357 static int index() { return GeneralHeap; }; |
| 1368 }; | 1358 }; |
| 1369 | 1359 |
| 1370 // FIXME: The forward declaration is layering violation. | 1360 // FIXME: The forward declaration is layering violation. |
| 1371 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 1361 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
| 1372 class Type; \ | 1362 class Type; \ |
| 1373 template<> \ | 1363 template<> \ |
| 1374 struct HeapIndexTrait<class Type> { \ | 1364 struct HeapIndexTrait<class Type> { \ |
| 1375 static int index() { return Type##Heap; }; \ | 1365 static int index() { return Type##Heap; }; \ |
| 1376 }; | 1366 }; |
| 1377 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 1367 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
| 1378 #undef DEFINE_TYPED_HEAP_TRAIT | 1368 #undef DEFINE_TYPED_HEAP_TRAIT |
| 1379 | 1369 |
| 1380 template<typename T> | 1370 template<typename T> |
| 1381 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) | 1371 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) |
| 1382 { | 1372 { |
| 1383 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1373 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1384 ASSERT(state->isAllocationAllowed()); | 1374 ASSERT(state->isAllocationAllowed()); |
| 1385 return state->heap(heapIndex)->allocate(size, gcInfoIndex); | 1375 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate
(size, gcInfoIndex); |
| 1386 } | 1376 } |
| 1387 | 1377 |
| 1388 template<typename T> | 1378 template<typename T> |
| 1389 Address Heap::allocate(size_t size) | 1379 Address Heap::allocate(size_t size) |
| 1390 { | 1380 { |
| 1391 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); | 1381 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); |
| 1392 } | 1382 } |
| 1393 | 1383 |
| 1394 template<typename T> | 1384 template<typename T> |
| 1395 Address Heap::reallocate(void* previous, size_t size) | 1385 Address Heap::reallocate(void* previous, size_t size) |
| (...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2429 template<typename T, size_t inlineCapacity> | 2419 template<typename T, size_t inlineCapacity> |
| 2430 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2420 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
| 2431 template<typename T, size_t inlineCapacity> | 2421 template<typename T, size_t inlineCapacity> |
| 2432 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2422 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
| 2433 template<typename T, typename U, typename V> | 2423 template<typename T, typename U, typename V> |
| 2434 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2424 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
| 2435 | 2425 |
| 2436 } // namespace blink | 2426 } // namespace blink |
| 2437 | 2427 |
| 2438 #endif // Heap_h | 2428 #endif // Heap_h |
| OLD | NEW |