| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 257 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 257 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } |
| 258 | 258 |
| 259 Header* heapObjectHeader() | 259 Header* heapObjectHeader() |
| 260 { | 260 { |
| 261 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); | 261 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he
aderPadding<Header>(); |
| 262 return reinterpret_cast<Header*>(headerAddress); | 262 return reinterpret_cast<Header*>(headerAddress); |
| 263 } | 263 } |
| 264 | 264 |
| 265 bool isMarked(); | 265 bool isMarked(); |
| 266 void unmark(); | 266 void unmark(); |
| 267 void getStats(HeapStats&); | 267 void getStatsForTesting(HeapStats&); |
| 268 void mark(Visitor*); | 268 void mark(Visitor*); |
| 269 void finalize(); | 269 void finalize(); |
| 270 void setDeadMark(); | 270 void setDeadMark(); |
| 271 virtual void markOrphaned() | 271 virtual void markOrphaned() |
| 272 { | 272 { |
| 273 // Zap the payload with a recognizable value to detect any incorrect | 273 // Zap the payload with a recognizable value to detect any incorrect |
| 274 // cross thread pointer usage. | 274 // cross thread pointer usage. |
| 275 memset(payload(), orphanedZapValue, payloadSize()); | 275 memset(payload(), orphanedZapValue, payloadSize()); |
| 276 BaseHeapPage::markOrphaned(); | 276 BaseHeapPage::markOrphaned(); |
| 277 } | 277 } |
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 528 return address() + sizeof(*this) + headerPadding<Header>(); | 528 return address() + sizeof(*this) + headerPadding<Header>(); |
| 529 } | 529 } |
| 530 | 530 |
| 531 static size_t payloadSize() | 531 static size_t payloadSize() |
| 532 { | 532 { |
| 533 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; | 533 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header
>()) & ~allocationMask; |
| 534 } | 534 } |
| 535 | 535 |
| 536 Address end() { return payload() + payloadSize(); } | 536 Address end() { return payload() + payloadSize(); } |
| 537 | 537 |
| 538 void getStats(HeapStats&); | 538 void getStatsForTesting(HeapStats&); |
| 539 void clearLiveAndMarkDead(); | 539 void clearLiveAndMarkDead(); |
| 540 void sweep(HeapStats*, ThreadHeap<Header>*); | 540 void sweep(HeapStats*, ThreadHeap<Header>*); |
| 541 void clearObjectStartBitMap(); | 541 void clearObjectStartBitMap(); |
| 542 void finalize(Header*); | 542 void finalize(Header*); |
| 543 virtual void checkAndMarkPointer(Visitor*, Address) override; | 543 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 544 #if ENABLE(GC_PROFILE_MARKING) | 544 #if ENABLE(GC_PROFILE_MARKING) |
| 545 const GCInfo* findGCInfo(Address) override; | 545 const GCInfo* findGCInfo(Address) override; |
| 546 #endif | 546 #endif |
| 547 #if ENABLE(GC_PROFILE_HEAP) | 547 #if ENABLE(GC_PROFILE_HEAP) |
| 548 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 548 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 704 | 704 |
| 705 // Sweep this part of the Blink heap. This finalizes dead objects | 705 // Sweep this part of the Blink heap. This finalizes dead objects |
| 706 // and builds freelists for all the unused memory. | 706 // and builds freelists for all the unused memory. |
| 707 virtual void sweep(HeapStats*) = 0; | 707 virtual void sweep(HeapStats*) = 0; |
| 708 virtual void postSweepProcessing() = 0; | 708 virtual void postSweepProcessing() = 0; |
| 709 | 709 |
| 710 virtual void clearFreeLists() = 0; | 710 virtual void clearFreeLists() = 0; |
| 711 virtual void clearLiveAndMarkDead() = 0; | 711 virtual void clearLiveAndMarkDead() = 0; |
| 712 | 712 |
| 713 virtual void makeConsistentForSweeping() = 0; | 713 virtual void makeConsistentForSweeping() = 0; |
| 714 | |
| 715 #if ENABLE(ASSERT) | 714 #if ENABLE(ASSERT) |
| 716 virtual bool isConsistentForSweeping() = 0; | 715 virtual bool isConsistentForSweeping() = 0; |
| 716 #endif |
| 717 virtual void getStatsForTesting(HeapStats&) = 0; |
| 717 | 718 |
| 718 virtual void getScannedStats(HeapStats&) = 0; | 719 virtual void updateRemainingAllocationSize() = 0; |
| 719 #endif | |
| 720 | 720 |
| 721 virtual void prepareHeapForTermination() = 0; | 721 virtual void prepareHeapForTermination() = 0; |
| 722 | 722 |
| 723 virtual int normalPageCount() = 0; | 723 virtual int normalPageCount() = 0; |
| 724 | 724 |
| 725 virtual BaseHeap* split(int normalPages) = 0; | 725 virtual BaseHeap* split(int normalPages) = 0; |
| 726 virtual void merge(BaseHeap* other) = 0; | 726 virtual void merge(BaseHeap* other) = 0; |
| 727 | 727 |
| 728 // Returns a bucket number for inserting a FreeListEntry of a | 728 // Returns a bucket number for inserting a FreeListEntry of a |
| 729 // given size. All FreeListEntries in the given bucket, n, have | 729 // given size. All FreeListEntries in the given bucket, n, have |
| (...skipping 26 matching lines...) Expand all Loading... |
| 756 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 756 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 757 #endif | 757 #endif |
| 758 | 758 |
| 759 virtual void sweep(HeapStats*); | 759 virtual void sweep(HeapStats*); |
| 760 virtual void postSweepProcessing(); | 760 virtual void postSweepProcessing(); |
| 761 | 761 |
| 762 virtual void clearFreeLists(); | 762 virtual void clearFreeLists(); |
| 763 virtual void clearLiveAndMarkDead(); | 763 virtual void clearLiveAndMarkDead(); |
| 764 | 764 |
| 765 virtual void makeConsistentForSweeping(); | 765 virtual void makeConsistentForSweeping(); |
| 766 | |
| 767 #if ENABLE(ASSERT) | 766 #if ENABLE(ASSERT) |
| 768 virtual bool isConsistentForSweeping(); | 767 virtual bool isConsistentForSweeping(); |
| 768 #endif |
| 769 virtual void getStatsForTesting(HeapStats&); |
| 769 | 770 |
| 770 virtual void getScannedStats(HeapStats&); | 771 virtual void updateRemainingAllocationSize(); |
| 771 #endif | |
| 772 | 772 |
| 773 ThreadState* threadState() { return m_threadState; } | 773 ThreadState* threadState() { return m_threadState; } |
| 774 HeapStats& stats() { return m_threadState->stats(); } | 774 HeapStats& stats() { return m_threadState->stats(); } |
| 775 | 775 |
| 776 inline Address allocate(size_t, const GCInfo*); | 776 inline Address allocate(size_t, const GCInfo*); |
| 777 void addToFreeList(Address, size_t); | 777 void addToFreeList(Address, size_t); |
| 778 inline static size_t roundedAllocationSize(size_t size) | 778 inline static size_t roundedAllocationSize(size_t size) |
| 779 { | 779 { |
| 780 return allocationSizeFromSize(size) - sizeof(Header); | 780 return allocationSizeFromSize(size) - sizeof(Header); |
| 781 } | 781 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 796 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 796 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); |
| 797 static size_t allocationSizeFromSize(size_t); | 797 static size_t allocationSizeFromSize(size_t); |
| 798 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 798 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| 799 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 799 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 800 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 800 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 801 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() &&
remainingAllocationSize(); } | 801 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() &&
remainingAllocationSize(); } |
| 802 void setAllocationPoint(Address point, size_t size) | 802 void setAllocationPoint(Address point, size_t size) |
| 803 { | 803 { |
| 804 ASSERT(!point || heapPageFromAddress(point)); | 804 ASSERT(!point || heapPageFromAddress(point)); |
| 805 ASSERT(size <= HeapPage<Header>::payloadSize()); | 805 ASSERT(size <= HeapPage<Header>::payloadSize()); |
| 806 updateRemainingAllocationSize(); |
| 806 m_currentAllocationPoint = point; | 807 m_currentAllocationPoint = point; |
| 807 m_remainingAllocationSize = size; | 808 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
| 808 } | 809 } |
| 809 void ensureCurrentAllocation(size_t, const GCInfo*); | 810 void ensureCurrentAllocation(size_t, const GCInfo*); |
| 810 bool allocateFromFreeList(size_t); | 811 bool allocateFromFreeList(size_t); |
| 811 | 812 |
| 812 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 813 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); |
| 813 void allocatePage(const GCInfo*); | 814 void allocatePage(const GCInfo*); |
| 814 | 815 |
| 815 #if ENABLE(ASSERT) | 816 #if ENABLE(ASSERT) |
| 816 bool pagesToBeSweptContains(Address); | 817 bool pagesToBeSweptContains(Address); |
| 817 bool pagesAllocatedDuringSweepingContains(Address); | 818 bool pagesAllocatedDuringSweepingContains(Address); |
| 818 #endif | 819 #endif |
| 819 | 820 |
| 820 void sweepNormalPages(HeapStats*); | 821 void sweepNormalPages(HeapStats*); |
| 821 void sweepLargePages(HeapStats*); | 822 void sweepLargePages(HeapStats*); |
| 822 bool coalesce(size_t); | 823 bool coalesce(size_t); |
| 823 | 824 |
| 824 Address m_currentAllocationPoint; | 825 Address m_currentAllocationPoint; |
| 825 size_t m_remainingAllocationSize; | 826 size_t m_remainingAllocationSize; |
| 827 size_t m_lastRemainingAllocationSize; |
| 826 | 828 |
| 827 HeapPage<Header>* m_firstPage; | 829 HeapPage<Header>* m_firstPage; |
| 828 LargeHeapObject<Header>* m_firstLargeHeapObject; | 830 LargeHeapObject<Header>* m_firstLargeHeapObject; |
| 829 | 831 |
| 830 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 832 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; |
| 831 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 833 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; |
| 832 | 834 |
| 833 // Merge point for parallel sweep. | 835 // Merge point for parallel sweep. |
| 834 HeapPage<Header>* m_mergePoint; | 836 HeapPage<Header>* m_mergePoint; |
| 835 | 837 |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 940 static const GCInfo* findGCInfo(Address); | 942 static const GCInfo* findGCInfo(Address); |
| 941 | 943 |
| 942 static String createBacktraceString(); | 944 static String createBacktraceString(); |
| 943 #endif | 945 #endif |
| 944 | 946 |
| 945 // Collect heap stats for all threads attached to the Blink | 947 // Collect heap stats for all threads attached to the Blink |
| 946 // garbage collector. Should only be called during garbage | 948 // garbage collector. Should only be called during garbage |
| 947 // collection where threads are known to be at safe points. | 949 // collection where threads are known to be at safe points. |
| 948 static void getStats(HeapStats*); | 950 static void getStats(HeapStats*); |
| 949 | 951 |
| 952 static void getStatsForTesting(HeapStats*); |
| 953 |
| 950 static void getHeapSpaceSize(uint64_t*, uint64_t*); | 954 static void getHeapSpaceSize(uint64_t*, uint64_t*); |
| 951 | 955 |
| 952 static void makeConsistentForSweeping(); | 956 static void makeConsistentForSweeping(); |
| 953 | 957 |
| 954 #if ENABLE(ASSERT) | 958 #if ENABLE(ASSERT) |
| 955 static bool isConsistentForSweeping(); | 959 static bool isConsistentForSweeping(); |
| 956 #endif | 960 #endif |
| 957 | 961 |
| 958 static void flushHeapDoesNotContainCache(); | 962 static void flushHeapDoesNotContainCache(); |
| 959 | 963 |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1356 size_t allocationSize = size + sizeof(Header); | 1360 size_t allocationSize = size + sizeof(Header); |
| 1357 // Align size with allocation granularity. | 1361 // Align size with allocation granularity. |
| 1358 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1362 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1359 return allocationSize; | 1363 return allocationSize; |
| 1360 } | 1364 } |
| 1361 | 1365 |
| 1362 template<typename Header> | 1366 template<typename Header> |
| 1363 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 1367 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| 1364 { | 1368 { |
| 1365 size_t allocationSize = allocationSizeFromSize(size); | 1369 size_t allocationSize = allocationSizeFromSize(size); |
| 1366 bool isLargeObject = allocationSize > blinkPageSize / 2; | 1370 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1367 if (isLargeObject) | 1371 Address headerAddress = m_currentAllocationPoint; |
| 1368 return allocateLargeObject(allocationSize, gcInfo); | 1372 m_currentAllocationPoint += allocationSize; |
| 1369 if (m_remainingAllocationSize < allocationSize) | 1373 m_remainingAllocationSize -= allocationSize; |
| 1370 return outOfLineAllocate(size, gcInfo); | 1374 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcI
nfo); |
| 1371 Address headerAddress = m_currentAllocationPoint; | 1375 Address result = headerAddress + sizeof(*header); |
| 1372 m_currentAllocationPoint += allocationSize; | 1376 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1373 m_remainingAllocationSize -= allocationSize; | 1377 |
| 1374 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo)
; | 1378 // Unpoison the memory used for the object (payload). |
| 1375 size_t payloadSize = allocationSize - sizeof(Header); | 1379 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| 1376 stats().increaseObjectSpace(payloadSize); | |
| 1377 Address result = headerAddress + sizeof(*header); | |
| 1378 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
| 1379 // Unpoison the memory used for the object (payload). | |
| 1380 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); | |
| 1381 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1380 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 1382 memset(result, 0, payloadSize); | 1381 memset(result, 0, allocationSize - sizeof(Header)); |
| 1383 #endif | 1382 #endif |
| 1384 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); | 1383 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
| 1385 return result; | 1384 return result; |
| 1385 } |
| 1386 return outOfLineAllocate(size, gcInfo); |
| 1386 } | 1387 } |
| 1387 | 1388 |
| 1388 template<typename T, typename HeapTraits> | 1389 template<typename T, typename HeapTraits> |
| 1389 Address Heap::allocate(size_t size) | 1390 Address Heap::allocate(size_t size) |
| 1390 { | 1391 { |
| 1391 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1392 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1392 ASSERT(state->isAllocationAllowed()); | 1393 ASSERT(state->isAllocationAllowed()); |
| 1393 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 1394 const GCInfo* gcInfo = GCInfoTrait<T>::get(); |
| 1394 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 1395 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); |
| 1395 BaseHeap* heap = state->heap(heapIndex); | 1396 BaseHeap* heap = state->heap(heapIndex); |
| (...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2397 }; | 2398 }; |
| 2398 | 2399 |
| 2399 template<typename T> | 2400 template<typename T> |
| 2400 struct IfWeakMember<WeakMember<T> > { | 2401 struct IfWeakMember<WeakMember<T> > { |
| 2401 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } | 2402 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit
or->isAlive(t.get()); } |
| 2402 }; | 2403 }; |
| 2403 | 2404 |
| 2404 } | 2405 } |
| 2405 | 2406 |
| 2406 #endif // Heap_h | 2407 #endif // Heap_h |
| OLD | NEW |