Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(658)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 798293002: Oilpan: attempt first-fit freelist allocation for backing heaps. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Comments + tidying Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | Source/platform/heap/Heap.cpp » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 697 matching lines...) Expand 10 before | Expand all | Expand 10 after
708 virtual void prepareHeapForTermination() = 0; 708 virtual void prepareHeapForTermination() = 0;
709 }; 709 };
710 710
711 template<typename Header> 711 template<typename Header>
712 class FreeList { 712 class FreeList {
713 public: 713 public:
714 FreeList(); 714 FreeList();
715 715
716 void addToFreeList(Address, size_t); 716 void addToFreeList(Address, size_t);
717 void clear(); 717 void clear();
718 FreeListEntry* takeEntry(size_t allocationSize);
719 718
720 // Returns a bucket number for inserting a FreeListEntry of a given size. 719 // Returns a bucket number for inserting a FreeListEntry of a given size.
721 // All FreeListEntries in the given bucket, n, have size >= 2^n. 720 // All FreeListEntries in the given bucket, n, have size >= 2^n.
722 static int bucketIndexForSize(size_t); 721 static int bucketIndexForSize(size_t);
723 722
724 private: 723 private:
725 int m_biggestFreeListIndex; 724 int m_biggestFreeListIndex;
726 725
727 // All FreeListEntries in the nth list have size >= 2^n. 726 // All FreeListEntries in the nth list have size >= 2^n.
728 FreeListEntry* m_freeLists[blinkPageSizeLog2]; 727 FreeListEntry* m_freeLists[blinkPageSizeLog2];
729 728
730 #if ENABLE(ASSERT)
731 friend class ThreadHeap<Header>; 729 friend class ThreadHeap<Header>;
732 #endif
733 }; 730 };
734 731
735 // Thread heaps represent a part of the per-thread Blink heap. 732 // Thread heaps represent a part of the per-thread Blink heap.
736 // 733 //
737 // Each Blink thread has a number of thread heaps: one general heap 734 // Each Blink thread has a number of thread heaps: one general heap
738 // that contains any type of object and a number of heaps specialized 735 // that contains any type of object and a number of heaps specialized
739 // for specific object types (such as Node). 736 // for specific object types (such as Node).
740 // 737 //
741 // Each thread heap contains the functionality to allocate new objects 738 // Each thread heap contains the functionality to allocate new objects
742 // (potentially adding new pages to the heap), to find and mark 739 // (potentially adding new pages to the heap), to find and mark
(...skipping 28 matching lines...) Expand all
771 768
772 ThreadState* threadState() { return m_threadState; } 769 ThreadState* threadState() { return m_threadState; }
773 770
774 void addToFreeList(Address address, size_t size) 771 void addToFreeList(Address address, size_t size)
775 { 772 {
776 ASSERT(pageFromAddress(address)); 773 ASSERT(pageFromAddress(address));
777 ASSERT(pageFromAddress(address + size - 1)); 774 ASSERT(pageFromAddress(address + size - 1));
778 m_freeList.addToFreeList(address, size); 775 m_freeList.addToFreeList(address, size);
779 } 776 }
780 777
781 inline Address allocate(size_t, const GCInfo*); 778 inline Address allocate(size_t payloadSize, const GCInfo*);
782 inline static size_t roundedAllocationSize(size_t size) 779 inline static size_t roundedAllocationSize(size_t size)
783 { 780 {
784 return allocationSizeFromSize(size) - sizeof(Header); 781 return allocationSizeFromSize(size) - sizeof(Header);
785 } 782 }
786 783
787 virtual void prepareHeapForTermination() override; 784 virtual void prepareHeapForTermination() override;
788 785
789 void removePageFromHeap(HeapPage<Header>*); 786 void removePageFromHeap(HeapPage<Header>*);
790 787
791 PLATFORM_EXPORT void promptlyFreeObject(Header*); 788 PLATFORM_EXPORT void promptlyFreeObject(Header*);
792 PLATFORM_EXPORT bool expandObject(Header*, size_t); 789 PLATFORM_EXPORT bool expandObject(Header*, size_t);
793 void shrinkObject(Header*, size_t); 790 void shrinkObject(Header*, size_t);
794 791
795 private: 792 private:
796 void addPageToHeap(const GCInfo*); 793 void addPageToHeap(const GCInfo*);
797 PLATFORM_EXPORT Address outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo*); 794 PLATFORM_EXPORT Address outOfLineAllocate(size_t allocationSize, const GCInf o*);
798 static size_t allocationSizeFromSize(size_t); 795 static size_t allocationSizeFromSize(size_t);
799 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 796 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
800 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 797 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
801 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 798 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
802 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r emainingAllocationSize(); } 799 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r emainingAllocationSize(); }
803 void setAllocationPoint(Address point, size_t size) 800 void setAllocationPoint(Address point, size_t size)
804 { 801 {
805 ASSERT(!point || pageFromAddress(point)); 802 ASSERT(!point || pageFromAddress(point));
806 ASSERT(size <= HeapPage<Header>::payloadSize()); 803 ASSERT(size <= HeapPage<Header>::payloadSize());
807 if (hasCurrentAllocationArea()) 804 if (hasCurrentAllocationArea())
808 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 805 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
809 updateRemainingAllocationSize(); 806 updateRemainingAllocationSize();
810 m_currentAllocationPoint = point; 807 m_currentAllocationPoint = point;
811 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; 808 m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
812 } 809 }
813 void updateRemainingAllocationSize(); 810 void updateRemainingAllocationSize();
814 bool allocateFromFreeList(size_t); 811 Address allocateFromFreeList(size_t, const GCInfo*);
815 812
816 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); 813 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**);
817 void allocatePage(const GCInfo*); 814 void allocatePage(const GCInfo*);
818 815
816 inline Address allocateSize(size_t allocationSize, const GCInfo*);
817 inline Address allocateAtAddress(Address, size_t allocationSize, const GCInf o*);
818
819 #if ENABLE(ASSERT) 819 #if ENABLE(ASSERT)
820 bool pagesToBeSweptContains(Address); 820 bool pagesToBeSweptContains(Address);
821 bool pagesAllocatedDuringSweepingContains(Address); 821 bool pagesAllocatedDuringSweepingContains(Address);
822 #endif 822 #endif
823 823
824 void sweepNormalPages(); 824 void sweepNormalPages();
825 void sweepLargePages(); 825 void sweepLargePages();
826 bool coalesce(size_t); 826 bool coalesce(size_t);
827 827
828 Address m_currentAllocationPoint; 828 Address m_currentAllocationPoint;
(...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 // therefore has to happen before any calculation on the size. 1267 // therefore has to happen before any calculation on the size.
1268 RELEASE_ASSERT(size < maxHeapObjectSize); 1268 RELEASE_ASSERT(size < maxHeapObjectSize);
1269 1269
1270 // Add space for header. 1270 // Add space for header.
1271 size_t allocationSize = size + sizeof(Header); 1271 size_t allocationSize = size + sizeof(Header);
1272 // Align size with allocation granularity. 1272 // Align size with allocation granularity.
1273 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 1273 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1274 return allocationSize; 1274 return allocationSize;
1275 } 1275 }
1276 1276
1277
haraken 2014/12/16 02:26:33 Unnecessary empty line.
1278 template<typename Header>
1279 inline Address ThreadHeap<Header>::allocateAtAddress(Address headerAddress, size _t allocationSize, const GCInfo* gcInfo)
1280 {
1281 new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1282 Address result = headerAddress + sizeof(Header);
1283 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1284
1285 // Unpoison the memory used for the object (payload).
1286 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header));
1287 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1288 memset(result, 0, allocationSize - sizeof(Header));
1289 #endif
1290 ASSERT(pageFromAddress(headerAddress + allocationSize - 1));
1291 return result;
1292 }
1293
1294 template<typename Header>
1295 Address ThreadHeap<Header>::allocateSize(size_t allocationSize, const GCInfo* gc Info)
1296 {
1297 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
1298 Address headerAddress = m_currentAllocationPoint;
1299 m_currentAllocationPoint += allocationSize;
1300 m_remainingAllocationSize -= allocationSize;
1301 return allocateAtAddress(headerAddress, allocationSize, gcInfo);
1302 }
1303 return outOfLineAllocate(allocationSize, gcInfo);
1304 }
1305
1277 template<typename Header> 1306 template<typename Header>
1278 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) 1307 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1279 { 1308 {
1280 size_t allocationSize = allocationSizeFromSize(size); 1309 return allocateSize(allocationSizeFromSize(size), gcInfo);
1281 if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
1282 Address headerAddress = m_currentAllocationPoint;
1283 m_currentAllocationPoint += allocationSize;
1284 m_remainingAllocationSize -= allocationSize;
1285 new (NotNull, headerAddress) Header(allocationSize, gcInfo);
1286 Address result = headerAddress + sizeof(Header);
1287 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1288
1289 // Unpoison the memory used for the object (payload).
1290 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header));
1291 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1292 memset(result, 0, allocationSize - sizeof(Header));
1293 #endif
1294 ASSERT(pageFromAddress(headerAddress + allocationSize - 1));
1295 return result;
1296 }
1297 return outOfLineAllocate(size, allocationSize, gcInfo);
1298 } 1310 }
1299 1311
1300 template<typename T, typename HeapTraits> 1312 template<typename T, typename HeapTraits>
1301 Address Heap::allocate(size_t size) 1313 Address Heap::allocate(size_t size)
1302 { 1314 {
1303 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1315 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1304 ASSERT(state->isAllocationAllowed()); 1316 ASSERT(state->isAllocationAllowed());
1305 const GCInfo* gcInfo = GCInfoTrait<T>::get(); 1317 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1306 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size); 1318 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size);
1307 BaseHeap* heap = state->heap(heapIndex); 1319 BaseHeap* heap = state->heap(heapIndex);
(...skipping 1022 matching lines...) Expand 10 before | Expand all | Expand 10 after
2330 template<typename T, size_t inlineCapacity> 2342 template<typename T, size_t inlineCapacity>
2331 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; 2343 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { };
2332 template<typename T, size_t inlineCapacity> 2344 template<typename T, size_t inlineCapacity>
2333 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; 2345 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { };
2334 template<typename T, typename U, typename V> 2346 template<typename T, typename U, typename V>
2335 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; 2347 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { };
2336 2348
2337 } // namespace blink 2349 } // namespace blink
2338 2350
2339 #endif // Heap_h 2351 #endif // Heap_h
OLDNEW
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | Source/platform/heap/Heap.cpp » ('J')

Powered by Google App Engine
This is Rietveld 408576698