 Chromium Code Reviews
 Chromium Code Reviews Issue 634243004:
  Oilpan: Simplify Heap::allocate  (Closed) 
  Base URL: svn://svn.chromium.org/blink/trunk
    
  
    Issue 634243004:
  Oilpan: Simplify Heap::allocate  (Closed) 
  Base URL: svn://svn.chromium.org/blink/trunk| OLD | NEW | 
|---|---|
| 1 /* | 1 /* | 
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 
| 3 * | 3 * | 
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without | 
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are | 
| 6 * met: | 6 * met: | 
| 7 * | 7 * | 
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright | 
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. | 
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above | 
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 245 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 245 size_t payloadSize() { return heapObjectHeader()->payloadSize(); } | 
| 246 | 246 | 
| 247 Header* heapObjectHeader() | 247 Header* heapObjectHeader() | 
| 248 { | 248 { | 
| 249 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 249 Address headerAddress = address() + sizeof(LargeHeapObject<Header>) + he aderPadding<Header>(); | 
| 250 return reinterpret_cast<Header*>(headerAddress); | 250 return reinterpret_cast<Header*>(headerAddress); | 
| 251 } | 251 } | 
| 252 | 252 | 
| 253 bool isMarked(); | 253 bool isMarked(); | 
| 254 void unmark(); | 254 void unmark(); | 
| 255 void getStats(HeapStats&); | 255 void getStatsForTesting(HeapStats&); | 
| 256 void mark(Visitor*); | 256 void mark(Visitor*); | 
| 257 void finalize(); | 257 void finalize(); | 
| 258 void setDeadMark(); | 258 void setDeadMark(); | 
| 259 virtual void markOrphaned() | 259 virtual void markOrphaned() | 
| 260 { | 260 { | 
| 261 // Zap the payload with a recognizable value to detect any incorrect | 261 // Zap the payload with a recognizable value to detect any incorrect | 
| 262 // cross thread pointer usage. | 262 // cross thread pointer usage. | 
| 263 memset(payload(), orphanedZapValue, payloadSize()); | 263 memset(payload(), orphanedZapValue, payloadSize()); | 
| 264 BaseHeapPage::markOrphaned(); | 264 BaseHeapPage::markOrphaned(); | 
| 265 } | 265 } | 
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 516 return address() + sizeof(*this) + headerPadding<Header>(); | 516 return address() + sizeof(*this) + headerPadding<Header>(); | 
| 517 } | 517 } | 
| 518 | 518 | 
| 519 static size_t payloadSize() | 519 static size_t payloadSize() | 
| 520 { | 520 { | 
| 521 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; | 521 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding<Header >()) & ~allocationMask; | 
| 522 } | 522 } | 
| 523 | 523 | 
| 524 Address end() { return payload() + payloadSize(); } | 524 Address end() { return payload() + payloadSize(); } | 
| 525 | 525 | 
| 526 void getStats(HeapStats&); | 526 void getStatsForTesting(HeapStats&); | 
| 527 void clearLiveAndMarkDead(); | 527 void clearLiveAndMarkDead(); | 
| 528 void sweep(HeapStats*, ThreadHeap<Header>*); | 528 void sweep(HeapStats*, ThreadHeap<Header>*); | 
| 529 void clearObjectStartBitMap(); | 529 void clearObjectStartBitMap(); | 
| 530 void finalize(Header*); | 530 void finalize(Header*); | 
| 531 virtual void checkAndMarkPointer(Visitor*, Address) override; | 531 virtual void checkAndMarkPointer(Visitor*, Address) override; | 
| 532 #if ENABLE(GC_PROFILE_MARKING) | 532 #if ENABLE(GC_PROFILE_MARKING) | 
| 533 const GCInfo* findGCInfo(Address) override; | 533 const GCInfo* findGCInfo(Address) override; | 
| 534 #endif | 534 #endif | 
| 535 #if ENABLE(GC_PROFILE_HEAP) | 535 #if ENABLE(GC_PROFILE_HEAP) | 
| 536 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 536 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 692 | 692 | 
| 693 // Sweep this part of the Blink heap. This finalizes dead objects | 693 // Sweep this part of the Blink heap. This finalizes dead objects | 
| 694 // and builds freelists for all the unused memory. | 694 // and builds freelists for all the unused memory. | 
| 695 virtual void sweep(HeapStats*) = 0; | 695 virtual void sweep(HeapStats*) = 0; | 
| 696 virtual void postSweepProcessing() = 0; | 696 virtual void postSweepProcessing() = 0; | 
| 697 | 697 | 
| 698 virtual void clearFreeLists() = 0; | 698 virtual void clearFreeLists() = 0; | 
| 699 virtual void clearLiveAndMarkDead() = 0; | 699 virtual void clearLiveAndMarkDead() = 0; | 
| 700 | 700 | 
| 701 virtual void makeConsistentForSweeping() = 0; | 701 virtual void makeConsistentForSweeping() = 0; | 
| 702 | |
| 703 #if ENABLE(ASSERT) | 702 #if ENABLE(ASSERT) | 
| 704 virtual bool isConsistentForSweeping() = 0; | 703 virtual bool isConsistentForSweeping() = 0; | 
| 704 #endif | |
| 705 virtual void getStatsForTesting(HeapStats&) = 0; | |
| 705 | 706 | 
| 706 virtual void getScannedStats(HeapStats&) = 0; | 707 virtual void updateRemainingAllocationSize() = 0; | 
| 707 #endif | |
| 708 | 708 | 
| 709 virtual void prepareHeapForTermination() = 0; | 709 virtual void prepareHeapForTermination() = 0; | 
| 710 | 710 | 
| 711 virtual int normalPageCount() = 0; | 711 virtual int normalPageCount() = 0; | 
| 712 | 712 | 
| 713 virtual BaseHeap* split(int normalPages) = 0; | 713 virtual BaseHeap* split(int normalPages) = 0; | 
| 714 virtual void merge(BaseHeap* other) = 0; | 714 virtual void merge(BaseHeap* other) = 0; | 
| 715 | 715 | 
| 716 // Returns a bucket number for inserting a FreeListEntry of a | 716 // Returns a bucket number for inserting a FreeListEntry of a | 
| 717 // given size. All FreeListEntries in the given bucket, n, have | 717 // given size. All FreeListEntries in the given bucket, n, have | 
| (...skipping 26 matching lines...) Expand all Loading... | |
| 744 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 744 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 
| 745 #endif | 745 #endif | 
| 746 | 746 | 
| 747 virtual void sweep(HeapStats*); | 747 virtual void sweep(HeapStats*); | 
| 748 virtual void postSweepProcessing(); | 748 virtual void postSweepProcessing(); | 
| 749 | 749 | 
| 750 virtual void clearFreeLists(); | 750 virtual void clearFreeLists(); | 
| 751 virtual void clearLiveAndMarkDead(); | 751 virtual void clearLiveAndMarkDead(); | 
| 752 | 752 | 
| 753 virtual void makeConsistentForSweeping(); | 753 virtual void makeConsistentForSweeping(); | 
| 754 | |
| 755 #if ENABLE(ASSERT) | 754 #if ENABLE(ASSERT) | 
| 756 virtual bool isConsistentForSweeping(); | 755 virtual bool isConsistentForSweeping(); | 
| 756 #endif | |
| 757 virtual void getStatsForTesting(HeapStats&); | |
| 757 | 758 | 
| 758 virtual void getScannedStats(HeapStats&); | 759 virtual void updateRemainingAllocationSize(); | 
| 759 #endif | |
| 760 | 760 | 
| 761 ThreadState* threadState() { return m_threadState; } | 761 ThreadState* threadState() { return m_threadState; } | 
| 762 HeapStats& stats() { return m_threadState->stats(); } | 762 HeapStats& stats() { return m_threadState->stats(); } | 
| 763 | 763 | 
| 764 inline Address allocate(size_t, const GCInfo*); | 764 inline Address allocate(size_t, const GCInfo*); | 
| 765 void addToFreeList(Address, size_t); | 765 void addToFreeList(Address, size_t); | 
| 766 inline static size_t roundedAllocationSize(size_t size) | 766 inline static size_t roundedAllocationSize(size_t size) | 
| 767 { | 767 { | 
| 768 return allocationSizeFromSize(size) - sizeof(Header); | 768 return allocationSizeFromSize(size) - sizeof(Header); | 
| 769 } | 769 } | 
| 770 | 770 | 
| 771 virtual void prepareHeapForTermination(); | 771 virtual void prepareHeapForTermination(); | 
| 772 | 772 | 
| 773 virtual int normalPageCount() { return m_numberOfNormalPages; } | 773 virtual int normalPageCount() { return m_numberOfNormalPages; } | 
| 774 | 774 | 
| 775 virtual BaseHeap* split(int numberOfNormalPages); | 775 virtual BaseHeap* split(int numberOfNormalPages); | 
| 776 virtual void merge(BaseHeap* splitOffBase); | 776 virtual void merge(BaseHeap* splitOffBase); | 
| 777 | 777 | 
| 778 void removePageFromHeap(HeapPage<Header>*); | 778 void removePageFromHeap(HeapPage<Header>*); | 
| 779 | 779 | 
| 780 PLATFORM_EXPORT void promptlyFreeObject(Header*); | 780 PLATFORM_EXPORT void promptlyFreeObject(Header*); | 
| 781 | 781 | 
| 782 private: | 782 private: | 
| 783 void addPageToHeap(const GCInfo*); | 783 void addPageToHeap(const GCInfo*); | 
| 784 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 784 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 
| 785 static size_t allocationSizeFromSize(size_t); | 785 static size_t allocationSizeFromSize(size_t); | 
| 786 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 786 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 
| 787 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 787 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 
| 788 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 788 size_t remainingAllocationSize() const | 
| 789 { | |
| 790 RELEASE_ASSERT(m_allocationLimit >= m_currentAllocationPoint); | |
| 791 return static_cast<size_t>(m_allocationLimit - m_currentAllocationPoint) ; | |
| 792 } | |
| 789 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } | 793 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } | 
| 790 void setAllocationPoint(Address point, size_t size) | 794 void setAllocationPoint(Address point, size_t size) | 
| 791 { | 795 { | 
| 792 ASSERT(!point || heapPageFromAddress(point)); | 796 ASSERT(!point || heapPageFromAddress(point)); | 
| 793 ASSERT(size <= HeapPage<Header>::payloadSize()); | 797 ASSERT(size <= HeapPage<Header>::payloadSize()); | 
| 798 updateRemainingAllocationSize(); | |
| 794 m_currentAllocationPoint = point; | 799 m_currentAllocationPoint = point; | 
| 795 m_remainingAllocationSize = size; | 800 m_allocationLimit = point + size; | 
| 801 m_lastRemainingAllocationSize = remainingAllocationSize(); | |
| 796 } | 802 } | 
| 797 void ensureCurrentAllocation(size_t, const GCInfo*); | 803 void ensureCurrentAllocation(size_t, const GCInfo*); | 
| 798 bool allocateFromFreeList(size_t); | 804 bool allocateFromFreeList(size_t); | 
| 799 | 805 | 
| 800 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 806 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 
| 801 void allocatePage(const GCInfo*); | 807 void allocatePage(const GCInfo*); | 
| 802 | 808 | 
| 803 #if ENABLE(ASSERT) | 809 #if ENABLE(ASSERT) | 
| 804 bool pagesToBeSweptContains(Address); | 810 bool pagesToBeSweptContains(Address); | 
| 805 bool pagesAllocatedDuringSweepingContains(Address); | 811 bool pagesAllocatedDuringSweepingContains(Address); | 
| 806 #endif | 812 #endif | 
| 807 | 813 | 
| 808 void sweepNormalPages(HeapStats*); | 814 void sweepNormalPages(HeapStats*); | 
| 809 void sweepLargePages(HeapStats*); | 815 void sweepLargePages(HeapStats*); | 
| 810 bool coalesce(size_t); | 816 bool coalesce(size_t); | 
| 811 | 817 | 
| 812 Address m_currentAllocationPoint; | 818 Address m_currentAllocationPoint; | 
| 813 size_t m_remainingAllocationSize; | 819 Address m_allocationLimit; | 
| 820 size_t m_lastRemainingAllocationSize; | |
| 814 | 821 | 
| 815 HeapPage<Header>* m_firstPage; | 822 HeapPage<Header>* m_firstPage; | 
| 816 LargeHeapObject<Header>* m_firstLargeHeapObject; | 823 LargeHeapObject<Header>* m_firstLargeHeapObject; | 
| 817 | 824 | 
| 818 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 825 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 
| 819 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 826 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 
| 820 | 827 | 
| 821 // Merge point for parallel sweep. | 828 // Merge point for parallel sweep. | 
| 822 HeapPage<Header>* m_mergePoint; | 829 HeapPage<Header>* m_mergePoint; | 
| 823 | 830 | 
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 928 static const GCInfo* findGCInfo(Address); | 935 static const GCInfo* findGCInfo(Address); | 
| 929 | 936 | 
| 930 static String createBacktraceString(); | 937 static String createBacktraceString(); | 
| 931 #endif | 938 #endif | 
| 932 | 939 | 
| 933 // Collect heap stats for all threads attached to the Blink | 940 // Collect heap stats for all threads attached to the Blink | 
| 934 // garbage collector. Should only be called during garbage | 941 // garbage collector. Should only be called during garbage | 
| 935 // collection where threads are known to be at safe points. | 942 // collection where threads are known to be at safe points. | 
| 936 static void getStats(HeapStats*); | 943 static void getStats(HeapStats*); | 
| 937 | 944 | 
| 945 static void getStatsForTesting(HeapStats*); | |
| 946 | |
| 938 static void getHeapSpaceSize(uint64_t*, uint64_t*); | 947 static void getHeapSpaceSize(uint64_t*, uint64_t*); | 
| 939 | 948 | 
| 940 static void makeConsistentForSweeping(); | 949 static void makeConsistentForSweeping(); | 
| 941 | 950 | 
| 942 #if ENABLE(ASSERT) | 951 #if ENABLE(ASSERT) | 
| 943 static bool isConsistentForSweeping(); | 952 static bool isConsistentForSweeping(); | 
| 944 #endif | 953 #endif | 
| 945 | 954 | 
| 946 static void flushHeapDoesNotContainCache(); | 955 static void flushHeapDoesNotContainCache(); | 
| 947 | 956 | 
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1344 size_t allocationSize = size + sizeof(Header); | 1353 size_t allocationSize = size + sizeof(Header); | 
| 1345 // Align size with allocation granularity. | 1354 // Align size with allocation granularity. | 
| 1346 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1355 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 
| 1347 return allocationSize; | 1356 return allocationSize; | 
| 1348 } | 1357 } | 
| 1349 | 1358 | 
| 1350 template<typename Header> | 1359 template<typename Header> | 
| 1351 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 1360 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 
| 1352 { | 1361 { | 
| 1353 size_t allocationSize = allocationSizeFromSize(size); | 1362 size_t allocationSize = allocationSizeFromSize(size); | 
| 1354 bool isLargeObject = allocationSize > blinkPageSize / 2; | 1363 Address nextAllocationPoint = m_currentAllocationPoint + allocationSize; | 
| 1355 if (isLargeObject) | 1364 if (LIKELY(nextAllocationPoint <= m_allocationLimit)) { | 
| 
Erik Corry
2014/10/23 07:18:36
I'm not sure this change is OK.  allocationSize ca
 
haraken
2014/10/23 08:37:53
Done.
 | |
| 1356 return allocateLargeObject(allocationSize, gcInfo); | 1365 Address headerAddress = m_currentAllocationPoint; | 
| 1357 if (m_remainingAllocationSize < allocationSize) | 1366 m_currentAllocationPoint = nextAllocationPoint; | 
| 1358 return outOfLineAllocate(size, gcInfo); | 1367 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcI nfo); | 
| 1359 Address headerAddress = m_currentAllocationPoint; | 1368 Address result = headerAddress + sizeof(*header); | 
| 1360 m_currentAllocationPoint += allocationSize; | 1369 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 
| 1361 m_remainingAllocationSize -= allocationSize; | 1370 | 
| 1362 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo) ; | 1371 // Unpoison the memory used for the object (payload). | 
| 1363 size_t payloadSize = allocationSize - sizeof(Header); | 1372 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); | 
| 1364 stats().increaseObjectSpace(payloadSize); | |
| 1365 Address result = headerAddress + sizeof(*header); | |
| 1366 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
| 1367 // Unpoison the memory used for the object (payload). | |
| 1368 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); | |
| 1369 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1373 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 
| 1370 memset(result, 0, payloadSize); | 1374 memset(result, 0, allocationSize - sizeof(Header)); | 
| 1371 #endif | 1375 #endif | 
| 1372 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); | 1376 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); | 
| 1373 return result; | 1377 return result; | 
| 1378 } | |
| 1379 return outOfLineAllocate(size, gcInfo); | |
| 1374 } | 1380 } | 
| 1375 | 1381 | 
| 1376 template<typename T, typename HeapTraits> | 1382 template<typename T, typename HeapTraits> | 
| 1377 Address Heap::allocate(size_t size) | 1383 Address Heap::allocate(size_t size) | 
| 1378 { | 1384 { | 
| 1379 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1385 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 
| 1380 ASSERT(state->isAllocationAllowed()); | 1386 ASSERT(state->isAllocationAllowed()); | 
| 1381 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 1387 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 
| 1382 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 1388 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 
| 1383 BaseHeap* heap = state->heap(heapIndex); | 1389 BaseHeap* heap = state->heap(heapIndex); | 
| (...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2385 }; | 2391 }; | 
| 2386 | 2392 | 
| 2387 template<typename T> | 2393 template<typename T> | 
| 2388 struct IfWeakMember<WeakMember<T> > { | 2394 struct IfWeakMember<WeakMember<T> > { | 
| 2389 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } | 2395 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } | 
| 2390 }; | 2396 }; | 
| 2391 | 2397 | 
| 2392 } | 2398 } | 
| 2393 | 2399 | 
| 2394 #endif // Heap_h | 2400 #endif // Heap_h | 
| OLD | NEW |