Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 638 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 649 public: | 649 public: |
| 650 FreeList(); | 650 FreeList(); |
| 651 | 651 |
| 652 void addToFreeList(Address, size_t); | 652 void addToFreeList(Address, size_t); |
| 653 void clear(); | 653 void clear(); |
| 654 | 654 |
| 655 // Returns a bucket number for inserting a FreeListEntry of a given size. | 655 // Returns a bucket number for inserting a FreeListEntry of a given size. |
| 656 // All FreeListEntries in the given bucket, n, have size >= 2^n. | 656 // All FreeListEntries in the given bucket, n, have size >= 2^n. |
| 657 static int bucketIndexForSize(size_t); | 657 static int bucketIndexForSize(size_t); |
| 658 | 658 |
| 659 void takeSnapshot(const String& dumpBaseName); | |
| 659 #if ENABLE(GC_PROFILING) | 660 #if ENABLE(GC_PROFILING) |
| 660 struct PerBucketFreeListStats { | 661 struct PerBucketFreeListStats { |
| 661 size_t entryCount; | 662 size_t entryCount; |
| 662 size_t freeSize; | 663 size_t freeSize; |
| 663 | 664 |
| 664 PerBucketFreeListStats() : entryCount(0), freeSize(0) { } | 665 PerBucketFreeListStats() : entryCount(0), freeSize(0) { } |
| 665 }; | 666 }; |
| 666 | 667 |
| 667 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz e) const; | 668 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz e) const; |
| 668 #endif | 669 #endif |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 686 class PLATFORM_EXPORT BaseHeap { | 687 class PLATFORM_EXPORT BaseHeap { |
| 687 public: | 688 public: |
| 688 BaseHeap(ThreadState*, int); | 689 BaseHeap(ThreadState*, int); |
| 689 virtual ~BaseHeap(); | 690 virtual ~BaseHeap(); |
| 690 void cleanupPages(); | 691 void cleanupPages(); |
| 691 | 692 |
| 692 void takeSnapshot(const String& dumpBaseName); | 693 void takeSnapshot(const String& dumpBaseName); |
| 693 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 694 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 694 BasePage* findPageFromAddress(Address); | 695 BasePage* findPageFromAddress(Address); |
| 695 #endif | 696 #endif |
| 697 virtual void takeFreelistSnapshot(String dumpBaseName) { }; | |
| 696 #if ENABLE(GC_PROFILING) | 698 #if ENABLE(GC_PROFILING) |
| 697 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 699 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 698 virtual void snapshotFreeList(TracedValue&) { }; | 700 virtual void snapshotFreeList(TracedValue&) { }; |
| 699 | 701 |
| 700 void countMarkedObjects(ClassAgeCountsMap&) const; | 702 void countMarkedObjects(ClassAgeCountsMap&) const; |
| 701 void countObjectsToSweep(ClassAgeCountsMap&) const; | 703 void countObjectsToSweep(ClassAgeCountsMap&) const; |
| 702 void incrementMarkedObjectsAge(); | 704 void incrementMarkedObjectsAge(); |
| 703 #endif | 705 #endif |
| 704 | 706 |
| 705 virtual void clearFreeLists() { } | 707 virtual void clearFreeLists() { } |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 745 { | 747 { |
| 746 ASSERT(findPageFromAddress(address)); | 748 ASSERT(findPageFromAddress(address)); |
| 747 ASSERT(findPageFromAddress(address + size - 1)); | 749 ASSERT(findPageFromAddress(address + size - 1)); |
| 748 m_freeList.addToFreeList(address, size); | 750 m_freeList.addToFreeList(address, size); |
| 749 } | 751 } |
| 750 virtual void clearFreeLists() override; | 752 virtual void clearFreeLists() override; |
| 751 #if ENABLE(ASSERT) | 753 #if ENABLE(ASSERT) |
| 752 virtual bool isConsistentForGC() override; | 754 virtual bool isConsistentForGC() override; |
| 753 bool pagesToBeSweptContains(Address); | 755 bool pagesToBeSweptContains(Address); |
| 754 #endif | 756 #endif |
| 757 void takeFreelistSnapshot(String dumpBaseName) override; | |
| 755 #if ENABLE(GC_PROFILING) | 758 #if ENABLE(GC_PROFILING) |
| 756 void snapshotFreeList(TracedValue&) override; | 759 void snapshotFreeList(TracedValue&) override; |
| 757 #endif | 760 #endif |
| 758 | 761 |
| 759 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 762 Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 760 | 763 |
| 761 void freePage(NormalPage*); | 764 void freePage(NormalPage*); |
| 762 | 765 |
| 763 bool coalesce(); | 766 bool coalesce(); |
| 764 void promptlyFreeObject(HeapObjectHeader*); | 767 void promptlyFreeObject(HeapObjectHeader*); |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 778 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 781 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 779 | 782 |
| 780 FreeList m_freeList; | 783 FreeList m_freeList; |
| 781 Address m_currentAllocationPoint; | 784 Address m_currentAllocationPoint; |
| 782 size_t m_remainingAllocationSize; | 785 size_t m_remainingAllocationSize; |
| 783 size_t m_lastRemainingAllocationSize; | 786 size_t m_lastRemainingAllocationSize; |
| 784 | 787 |
| 785 // The size of promptly freed objects in the heap. | 788 // The size of promptly freed objects in the heap. |
| 786 size_t m_promptlyFreedSize; | 789 size_t m_promptlyFreedSize; |
| 787 | 790 |
| 788 #if ENABLE(GC_PROFILING) | |
| 789 size_t m_cumulativeAllocationSize; | 791 size_t m_cumulativeAllocationSize; |
| 790 size_t m_allocationCount; | 792 size_t m_allocationCount; |
| 791 size_t m_inlineAllocationCount; | 793 size_t m_inlineAllocationCount; |
| 792 #endif | |
| 793 }; | 794 }; |
| 794 | 795 |
| 795 class LargeObjectHeap final : public BaseHeap { | 796 class LargeObjectHeap final : public BaseHeap { |
| 796 public: | 797 public: |
| 797 LargeObjectHeap(ThreadState*, int); | 798 LargeObjectHeap(ThreadState*, int); |
| 798 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); | 799 Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); |
| 799 void freeLargeObjectPage(LargeObjectPage*); | 800 void freeLargeObjectPage(LargeObjectPage*); |
| 800 #if ENABLE(ASSERT) | 801 #if ENABLE(ASSERT) |
| 801 virtual bool isConsistentForGC() override { return true; } | 802 virtual bool isConsistentForGC() override { return true; } |
| 802 #endif | 803 #endif |
| (...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1303 NO_SANITIZE_ADDRESS inline | 1304 NO_SANITIZE_ADDRESS inline |
| 1304 void HeapObjectHeader::markDead() | 1305 void HeapObjectHeader::markDead() |
| 1305 { | 1306 { |
| 1306 checkHeader(); | 1307 checkHeader(); |
| 1307 ASSERT(!isMarked()); | 1308 ASSERT(!isMarked()); |
| 1308 m_encoded |= headerDeadBitMask; | 1309 m_encoded |= headerDeadBitMask; |
| 1309 } | 1310 } |
| 1310 | 1311 |
| 1311 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) | 1312 inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcIn foIndex) |
| 1312 { | 1313 { |
| 1313 #if ENABLE(GC_PROFILING) | |
|
haraken
2015/06/23 05:38:58
This function is performance-sensitive and we don'
ssid
2015/06/23 06:03:39
This means that these values will never be added t
haraken
2015/06/23 06:24:24
Yeah, it's a bit unfortunate we can't have the val
ssid
2015/06/23 07:39:56
Okay, removing it.
| |
| 1314 m_cumulativeAllocationSize += allocationSize; | 1314 m_cumulativeAllocationSize += allocationSize; |
| 1315 ++m_allocationCount; | 1315 ++m_allocationCount; |
| 1316 #endif | |
| 1317 | 1316 |
| 1318 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1317 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1319 #if ENABLE(GC_PROFILING) | |
| 1320 ++m_inlineAllocationCount; | 1318 ++m_inlineAllocationCount; |
| 1321 #endif | |
| 1322 Address headerAddress = m_currentAllocationPoint; | 1319 Address headerAddress = m_currentAllocationPoint; |
| 1323 m_currentAllocationPoint += allocationSize; | 1320 m_currentAllocationPoint += allocationSize; |
| 1324 m_remainingAllocationSize -= allocationSize; | 1321 m_remainingAllocationSize -= allocationSize; |
| 1325 ASSERT(gcInfoIndex > 0); | 1322 ASSERT(gcInfoIndex > 0); |
| 1326 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); | 1323 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde x); |
| 1327 Address result = headerAddress + sizeof(HeapObjectHeader); | 1324 Address result = headerAddress + sizeof(HeapObjectHeader); |
| 1328 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1325 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1329 | 1326 |
| 1330 // Unpoison the memory used for the object (payload). | 1327 // Unpoison the memory used for the object (payload). |
| 1331 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader)); | 1328 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe ader)); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1389 size_t copySize = previousHeader->payloadSize(); | 1386 size_t copySize = previousHeader->payloadSize(); |
| 1390 if (copySize > size) | 1387 if (copySize > size) |
| 1391 copySize = size; | 1388 copySize = size; |
| 1392 memcpy(address, previous, copySize); | 1389 memcpy(address, previous, copySize); |
| 1393 return address; | 1390 return address; |
| 1394 } | 1391 } |
| 1395 | 1392 |
| 1396 } // namespace blink | 1393 } // namespace blink |
| 1397 | 1394 |
| 1398 #endif // Heap_h | 1395 #endif // Heap_h |
| OLD | NEW |