Chromium Code Reviews| Index: Source/platform/heap/Heap.h |
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
| index 2365632741e4b20576cf2db71237d027adc48395..a46a4a8237dbbf6c5745e0a69f410f7563fa09a4 100644 |
| --- a/Source/platform/heap/Heap.h |
| +++ b/Source/platform/heap/Heap.h |
| @@ -386,8 +386,17 @@ public: |
| virtual void removeFromHeap() = 0; |
| virtual void sweep() = 0; |
| virtual void markUnmarkedObjectsDead() = 0; |
| + |
| #if defined(ADDRESS_SANITIZER) |
| - virtual void poisonUnmarkedObjects() = 0; |
| + enum ObjectsToPoison { |
| + UnmarkedOnly, |
| + UnmarkedOrMarked, |
| + }; |
| + enum Poisoning { |
| + SetPoison, |
| + ClearPoison, |
| + }; |
| + virtual void poisonObjects(ObjectsToPoison, Poisoning) = 0; |
| #endif |
| // Check if the given address points to an object in this |
| // heap page. If so, find the start of that object and mark it |
| @@ -474,7 +483,7 @@ public: |
| virtual void sweep() override; |
| virtual void markUnmarkedObjectsDead() override; |
| #if defined(ADDRESS_SANITIZER) |
| - virtual void poisonUnmarkedObjects() override; |
| + virtual void poisonObjects(ObjectsToPoison, Poisoning) override; |
| #endif |
| virtual void checkAndMarkPointer(Visitor*, Address) override; |
| virtual void markOrphaned() override; |
| @@ -536,7 +545,7 @@ public: |
| virtual void sweep() override; |
| virtual void markUnmarkedObjectsDead() override; |
| #if defined(ADDRESS_SANITIZER) |
| - virtual void poisonUnmarkedObjects() override; |
| + virtual void poisonObjects(ObjectsToPoison, Poisoning) override; |
| #endif |
| virtual void checkAndMarkPointer(Visitor*, Address) override; |
| virtual void markOrphaned() override; |
| @@ -701,6 +710,7 @@ public: |
| void prepareForSweep(); |
| #if defined(ADDRESS_SANITIZER) |
| void poisonUnmarkedObjects(); |
| + void poisonHeap(bool poisonOrNot); |
| #endif |
| Address lazySweep(size_t, size_t gcInfoIndex); |
| void sweepUnsweptPage(); |
| @@ -904,7 +914,7 @@ public: |
| return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| } |
| static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size_t gcInfoIndex); |
| - template<typename T> static Address allocate(size_t); |
| + template<typename T> static Address allocate(size_t, bool eagerlySweep = false); |
| template<typename T> static Address reallocate(void* previous, size_t); |
| enum GCReason { |
| @@ -1010,6 +1020,9 @@ private: |
| // Reset counters that track live and allocated-since-last-GC sizes. |
| static void resetHeapCounters(); |
| + static int heapIndexForObjectSize(size_t); |
| + static bool isNormalHeapIndex(int); |
| + |
| static Visitor* s_markingVisitor; |
| static CallbackStack* s_markingStack; |
| static CallbackStack* s_postMarkingCallbackStack; |
| @@ -1031,6 +1044,21 @@ private: |
| friend class ThreadState; |
| }; |
| +template<typename T> |
| +struct IsEagerlyFinalizedType { |
| +private: |
| + typedef char YesType; |
| + struct NoType { |
| + char padding[8]; |
| + }; |
| + |
| + template <typename U> static YesType checkMarker(typename U::IsEagerlyFinalizedMarker*); |
| + template <typename U> static NoType checkMarker(...); |
| + |
| +public: |
| + static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType); |
| +}; |
| + |
| template<typename T> class GarbageCollected { |
| WTF_MAKE_NONCOPYABLE(GarbageCollected); |
| @@ -1055,12 +1083,12 @@ public: |
| void* operator new(size_t size) |
| { |
| - return allocateObject(size); |
| + return allocateObject(size, IsEagerlyFinalizedType<T>::value); |
| } |
| - static void* allocateObject(size_t size) |
| + static void* allocateObject(size_t size, bool eagerlySweep) |
| { |
| - return Heap::allocate<T>(size); |
| + return Heap::allocate<T>(size, eagerlySweep); |
| } |
| void operator delete(void* p) |
| @@ -1076,7 +1104,7 @@ protected: |
| // Assigning class types to their heaps. |
| // |
| -// We use sized heaps for most 'normal' objcts to improve memory locality. |
| +// We use sized heaps for most 'normal' objects to improve memory locality. |
| // It seems that the same type of objects are likely to be accessed together, |
| // which means that we want to group objects by type. That's one reason |
| // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), |
| @@ -1089,24 +1117,26 @@ protected: |
| // instances have to be finalized early and cannot be delayed until lazy |
| // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() |
| // macro is used to declare a class (and its derived classes) as being |
| -// in need of eagerly finalized. Must be defined with 'public' visibility |
| +// in need of eager finalization. Must be defined with 'public' visibility |
| // for a class. |
| // |
| -template<typename T, typename Enabled = void> |
| -class HeapIndexTrait { |
| -public: |
| - static int heapIndexForObject(size_t size) |
| - { |
| - if (size < 64) { |
| - if (size < 32) |
| - return NormalPage1HeapIndex; |
| - return NormalPage2HeapIndex; |
| - } |
| - if (size < 128) |
| - return NormalPage3HeapIndex; |
| - return NormalPage4HeapIndex; |
| + |
| +inline int Heap::heapIndexForObjectSize(size_t size) |
| +{ |
| + if (size < 64) { |
| + if (size < 32) |
| + return NormalPage1HeapIndex; |
| + return NormalPage2HeapIndex; |
| } |
| -}; |
| + if (size < 128) |
| + return NormalPage3HeapIndex; |
| + return NormalPage4HeapIndex; |
| +} |
| + |
| +inline bool Heap::isNormalHeapIndex(int index) |
| +{ |
| + return index >= NormalPage1HeapIndex && index <= NormalPage4HeapIndex; |
| +} |
| // TODO(Oilpan): enable this macro when enabling lazy sweeping, non-Oilpan. |
| #if ENABLE(OILPAN) |
| @@ -1119,30 +1149,6 @@ public: |
| #define EAGERLY_FINALIZE_WILL_BE_REMOVED() |
| #endif |
| -template<typename T> |
| -struct IsEagerlyFinalizedType { |
| -private: |
| - typedef char YesType; |
| - struct NoType { |
| - char padding[8]; |
| - }; |
| - |
| - template <typename U> static YesType checkMarker(typename U::IsEagerlyFinalizedMarker*); |
| - template <typename U> static NoType checkMarker(...); |
| - |
| -public: |
| - static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType); |
| -}; |
| - |
| -template<typename T> |
| -class HeapIndexTrait<T, typename WTF::EnableIf<IsEagerlyFinalizedType<T>::value>::Type> { |
| -public: |
| - static int heapIndexForObject(size_t) |
| - { |
| - return EagerSweepHeapIndex; |
| - } |
| -}; |
| - |
| NO_SANITIZE_ADDRESS inline |
| size_t HeapObjectHeader::size() const |
| { |
| @@ -1267,32 +1273,38 @@ inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he |
| } |
| template<typename T> |
| -Address Heap::allocate(size_t size) |
| +Address Heap::allocate(size_t size, bool eagerlySweep) |
| { |
| ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| - return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexForObject(size), GCInfoTrait<T>::index()); |
| + return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? EagerSweepHeapIndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index()); |
| } |
| template<typename T> |
| Address Heap::reallocate(void* previous, size_t size) |
| { |
| + // Not intended to be a full C realloc() substitute; |
| + // realloc(nullptr, size) is not a supported alias for malloc(size). |
| + |
| + // TODO(sof): promptly free the previous object. |
| if (!size) { |
| - // If the new size is 0 this is equivalent to either free(previous) or |
| - // malloc(0). In both cases we do nothing and return nullptr. |
| + // If the new size is 0 this is considered equivalent to free(previous). |
| return nullptr; |
| } |
| + |
| ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| - // TODO(haraken): reallocate() should use the heap that the original object |
| - // is using. This won't be a big deal since reallocate() is rarely used. |
| - Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexForObject(size), GCInfoTrait<T>::index()); |
| - if (!previous) { |
| - // This is equivalent to malloc(size). |
| - return address; |
| - } |
| HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); |
| + BasePage* page = pageFromObject(previousHeader); |
| + ASSERT(page); |
| + int heapIndex = page->heap()->heapIndex(); |
|
haraken
2015/05/28 12:30:04
I'd add ASSERT(heapIndex != EagerSweepHeapIndex) /
sof
2015/05/28 12:51:39
That seems like a random restriction to make; what
|
| + // Recompute the effective heap index if previous allocation |
| + // was on the normal heaps or a large object. |
| + if (isNormalHeapIndex(heapIndex) || heapIndex == LargeObjectHeapIndex) |
| + heapIndex = heapIndexForObjectSize(size); |
| + |
| // TODO(haraken): We don't support reallocate() for finalizable objects. |
| ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
| ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
| + Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTrait<T>::index()); |
| size_t copySize = previousHeader->payloadSize(); |
| if (copySize > size) |
| copySize = size; |