Chromium Code Reviews| Index: Source/platform/heap/Heap.h |
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
| index aaa91892dcebc1b96df3b475168c9c0a5cf7998f..9a9c0ba5e1869e9570b0dccd116c2937edf9deb4 100644 |
| --- a/Source/platform/heap/Heap.h |
| +++ b/Source/platform/heap/Heap.h |
| @@ -252,7 +252,7 @@ public: |
| bool isMarked(); |
| void unmark(); |
| - void getStats(HeapStats&); |
| + void getStatsForTesting(HeapStats&); |
| void mark(Visitor*); |
| void finalize(); |
| void setDeadMark(); |
| @@ -523,7 +523,7 @@ public: |
| Address end() { return payload() + payloadSize(); } |
| - void getStats(HeapStats&); |
| + void getStatsForTesting(HeapStats&); |
| void clearLiveAndMarkDead(); |
| void sweep(HeapStats*, ThreadHeap<Header>*); |
| void clearObjectStartBitMap(); |
| @@ -699,12 +699,12 @@ public: |
| virtual void clearLiveAndMarkDead() = 0; |
| virtual void makeConsistentForSweeping() = 0; |
| - |
| #if ENABLE(ASSERT) |
| virtual bool isConsistentForSweeping() = 0; |
| - |
| - virtual void getScannedStats(HeapStats&) = 0; |
| #endif |
| + virtual void getStatsForTesting(HeapStats&) = 0; |
| + |
| + virtual void updateRemainingAllocationSize() = 0; |
| virtual void prepareHeapForTermination() = 0; |
| @@ -751,12 +751,12 @@ public: |
| virtual void clearLiveAndMarkDead(); |
| virtual void makeConsistentForSweeping(); |
| - |
| #if ENABLE(ASSERT) |
| virtual bool isConsistentForSweeping(); |
| - |
| - virtual void getScannedStats(HeapStats&); |
| #endif |
| + virtual void getStatsForTesting(HeapStats&); |
| + |
| + virtual void updateRemainingAllocationSize(); |
| ThreadState* threadState() { return m_threadState; } |
| HeapStats& stats() { return m_threadState->stats(); } |
| @@ -785,14 +785,20 @@ private: |
| static size_t allocationSizeFromSize(size_t); |
| PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| - size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| + size_t remainingAllocationSize() const |
| + { |
| + RELEASE_ASSERT(m_allocationLimit >= m_currentAllocationPoint); |
| + return static_cast<size_t>(m_allocationLimit - m_currentAllocationPoint); |
| + } |
| bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
| void setAllocationPoint(Address point, size_t size) |
| { |
| ASSERT(!point || heapPageFromAddress(point)); |
| ASSERT(size <= HeapPage<Header>::payloadSize()); |
| + updateRemainingAllocationSize(); |
| m_currentAllocationPoint = point; |
| - m_remainingAllocationSize = size; |
| + m_allocationLimit = point + size; |
| + m_lastRemainingAllocationSize = remainingAllocationSize(); |
| } |
| void ensureCurrentAllocation(size_t, const GCInfo*); |
| bool allocateFromFreeList(size_t); |
| @@ -810,7 +816,8 @@ private: |
| bool coalesce(size_t); |
| Address m_currentAllocationPoint; |
| - size_t m_remainingAllocationSize; |
| + Address m_allocationLimit; |
| + size_t m_lastRemainingAllocationSize; |
| HeapPage<Header>* m_firstPage; |
| LargeHeapObject<Header>* m_firstLargeHeapObject; |
| @@ -935,6 +942,8 @@ public: |
| // collection where threads are known to be at safe points. |
| static void getStats(HeapStats*); |
| + static void getStatsForTesting(HeapStats*); |
| + |
| static void getHeapSpaceSize(uint64_t*, uint64_t*); |
| static void makeConsistentForSweeping(); |
| @@ -1351,26 +1360,23 @@ template<typename Header> |
| Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| { |
| size_t allocationSize = allocationSizeFromSize(size); |
| - bool isLargeObject = allocationSize > blinkPageSize / 2; |
| - if (isLargeObject) |
| - return allocateLargeObject(allocationSize, gcInfo); |
| - if (m_remainingAllocationSize < allocationSize) |
| - return outOfLineAllocate(size, gcInfo); |
| - Address headerAddress = m_currentAllocationPoint; |
| - m_currentAllocationPoint += allocationSize; |
| - m_remainingAllocationSize -= allocationSize; |
| - Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
| - size_t payloadSize = allocationSize - sizeof(Header); |
| - stats().increaseObjectSpace(payloadSize); |
| - Address result = headerAddress + sizeof(*header); |
| - ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| - // Unpoison the memory used for the object (payload). |
| - ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); |
| + Address nextAllocationPoint = m_currentAllocationPoint + allocationSize; |
| + if (LIKELY(nextAllocationPoint <= m_allocationLimit)) { |
|
Erik Corry
2014/10/23 07:18:36
I'm not sure this change is OK. allocationSize ca
haraken
2014/10/23 08:37:53
Done.
|
| + Address headerAddress = m_currentAllocationPoint; |
| + m_currentAllocationPoint = nextAllocationPoint; |
| + Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
| + Address result = headerAddress + sizeof(*header); |
| + ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| + |
| + // Unpoison the memory used for the object (payload). |
| + ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| - memset(result, 0, payloadSize); |
| + memset(result, 0, allocationSize - sizeof(Header)); |
| #endif |
| - ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
| - return result; |
| + ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
| + return result; |
| + } |
| + return outOfLineAllocate(size, gcInfo); |
| } |
| template<typename T, typename HeapTraits> |