Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(145)

Unified Diff: Source/platform/heap/Heap.h

Issue 906213002: Oilpan: Rename heap classes (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: Source/platform/heap/Heap.h
diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h
index ce5c79d6c93a38cda4832a9cf9acd6a3379c63c4..386f1b75883e03bad207d35b3b574d4b069a5005 100644
--- a/Source/platform/heap/Heap.h
+++ b/Source/platform/heap/Heap.h
@@ -89,7 +89,7 @@ static const intptr_t zappedVTable = 0xd0d;
class CallbackStack;
class PageMemory;
-class ThreadHeapForHeapPage;
+class NormalPageHeap;
template<ThreadAffinity affinity> class ThreadLocalPersistents;
template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity>> class Persistent;
@@ -105,7 +105,7 @@ class TracedValue;
// page size is 2^17 byte and each object is guaranteed to be aligned with
// 2^3 byte.
// - For large objects, |size| is 0. The actual size of a large object is
-// stored in LargeObject::m_payloadSize.
+// stored in LargeObjectPage::m_payloadSize.
// - 15 bit is enough for gcInfoIndex because there are less than 2^15 types
// in Blink.
const size_t headerGCInfoIndexShift = 17;
@@ -125,9 +125,9 @@ const size_t headerDeadBitMask = 4;
const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask;
const size_t largeObjectSizeInHeader = 0;
const size_t gcInfoIndexForFreeListHeader = 0;
-const size_t nonLargeObjectSizeMax = 1 << 17;
+const size_t nonLargeObjectPageSizeMax = 1 << 17;
-static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
+static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
class PLATFORM_EXPORT HeapObjectHeader {
public:
@@ -151,7 +151,7 @@ public:
#endif
ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
- ASSERT(size < nonLargeObjectSizeMax);
+ ASSERT(size < nonLargeObjectPageSizeMax);
ASSERT(!(size & allocationMask));
m_encoded = (gcInfoIndex << headerGCInfoIndexShift) | size | (gcInfoIndex ? 0 : headerFreedBitMask);
}
@@ -350,25 +350,25 @@ inline bool isPageHeaderAddress(Address address)
// FIXME: Add a good comment about the heap layout once heap relayout work
// is done.
-class BaseHeapPage {
+class BasePage {
public:
- BaseHeapPage(PageMemory*, ThreadHeap*);
- virtual ~BaseHeapPage() { }
+ BasePage(PageMemory*, BaseHeap*);
+ virtual ~BasePage() { }
- void link(BaseHeapPage** previousNext)
+ void link(BasePage** previousNext)
{
m_next = *previousNext;
*previousNext = this;
}
- void unlink(BaseHeapPage** previousNext)
+ void unlink(BasePage** previousNext)
{
*previousNext = m_next;
m_next = nullptr;
}
- BaseHeapPage* next() const { return m_next; }
+ BasePage* next() const { return m_next; }
// virtual methods are slow. So performance-sensitive methods
- // should be defined as non-virtual methods on HeapPage and LargeObject.
+ // should be defined as non-virtual methods on NormalPage and LargeObjectPage.
// The following methods are not performance-sensitive.
virtual size_t objectPayloadSizeForTesting() = 0;
virtual bool isEmpty() = 0;
@@ -393,11 +393,11 @@ public:
virtual bool contains(Address) = 0;
#endif
virtual size_t size() = 0;
- virtual bool isLargeObject() { return false; }
+ virtual bool isLargeObjectPage() { return false; }
Address address() { return reinterpret_cast<Address>(this); }
PageMemory* storage() const { return m_storage; }
- ThreadHeap* heap() const { return m_heap; }
+ BaseHeap* heap() const { return m_heap; }
bool orphaned() { return !m_heap; }
bool terminating() { return m_terminating; }
void setTerminating() { m_terminating = true; }
@@ -419,8 +419,8 @@ public:
private:
PageMemory* m_storage;
- ThreadHeap* m_heap;
- BaseHeapPage* m_next;
+ BaseHeap* m_heap;
+ BasePage* m_next;
// Whether the page is part of a terminating thread or not.
bool m_terminating;
@@ -430,20 +430,20 @@ private:
// Set to false at the start of a sweep, true upon completion
// of lazy sweeping.
bool m_swept;
- friend class ThreadHeap;
+ friend class BaseHeap;
};
-class HeapPage final : public BaseHeapPage {
+class NormalPage final : public BasePage {
public:
- HeapPage(PageMemory*, ThreadHeap*);
+ NormalPage(PageMemory*, BaseHeap*);
Address payload()
{
- return address() + sizeof(HeapPage) + headerPadding();
+ return address() + sizeof(NormalPage) + headerPadding();
}
size_t payloadSize()
{
- return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~allocationMask;
+ return (blinkPagePayloadSize() - sizeof(NormalPage) - headerPadding()) & ~allocationMask;
}
Address payloadEnd() { return payload() + payloadSize(); }
bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); }
@@ -464,7 +464,7 @@ public:
ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize());
#endif
memset(payload(), orphanedZapValue, payloadSize());
- BaseHeapPage::markOrphaned();
+ BasePage::markOrphaned();
}
#if ENABLE(GC_PROFILING)
const GCInfo* findGCInfo(Address) override;
@@ -489,11 +489,11 @@ public:
// the size of the header plus the padding a multiple of 8 bytes.
static size_t headerPadding()
{
- return (sizeof(HeapPage) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
+ return (sizeof(NormalPage) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
}
- ThreadHeapForHeapPage* heapForHeapPage();
+ NormalPageHeap* heapForNormalPage();
void clearObjectStartBitMap();
#if defined(ADDRESS_SANITIZER)
@@ -514,10 +514,10 @@ private:
// In order to use the same memory allocation routines for everything allocated
// in the heap, large objects are considered heap pages containing only one
// object.
-class LargeObject final : public BaseHeapPage {
+class LargeObjectPage final : public BasePage {
public:
- LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize)
- : BaseHeapPage(storage, heap)
+ LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t payloadSize)
+ : BasePage(storage, heap)
, m_payloadSize(payloadSize)
{
}
@@ -538,7 +538,7 @@ public:
// Zap the payload with a recognizable value to detect any incorrect
// cross thread pointer usage.
memset(payload(), orphanedZapValue, payloadSize());
- BaseHeapPage::markOrphaned();
+ BasePage::markOrphaned();
}
#if ENABLE(GC_PROFILING)
@@ -560,19 +560,19 @@ public:
#endif
virtual size_t size()
{
- return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader) + m_payloadSize;
+ return sizeof(LargeObjectPage) + headerPadding() + sizeof(HeapObjectHeader) + m_payloadSize;
}
// Compute the amount of padding we have to add to a header to make
// the size of the header plus the padding a multiple of 8 bytes.
static size_t headerPadding()
{
- return (sizeof(LargeObject) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
+ return (sizeof(LargeObjectPage) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
}
- virtual bool isLargeObject() override { return true; }
+ virtual bool isLargeObjectPage() override { return true; }
HeapObjectHeader* heapObjectHeader()
{
- Address headerAddress = address() + sizeof(LargeObject) + headerPadding();
+ Address headerAddress = address() + sizeof(LargeObjectPage) + headerPadding();
return reinterpret_cast<HeapObjectHeader*>(headerAddress);
}
@@ -667,10 +667,10 @@ private:
Mutex m_mutex[NumberOfHeaps];
};
-class OrphanedPagePool : public PagePool<BaseHeapPage> {
+class OrphanedPagePool : public PagePool<BasePage> {
public:
~OrphanedPagePool();
- void addOrphanedPage(int, BaseHeapPage*);
+ void addOrphanedPage(int, BasePage*);
void decommitOrphanedPages();
#if ENABLE(ASSERT)
bool contains(void*);
@@ -707,7 +707,7 @@ private:
// All FreeListEntries in the nth list have size >= 2^n.
FreeListEntry* m_freeLists[blinkPageSizeLog2];
- friend class ThreadHeapForHeapPage;
+ friend class NormalPageHeap;
};
// Thread heaps represent a part of the per-thread Blink heap.
@@ -720,14 +720,14 @@ private:
// (potentially adding new pages to the heap), to find and mark
// objects during conservative stack scanning and to sweep the set of
// pages after a GC.
-class PLATFORM_EXPORT ThreadHeap {
+class PLATFORM_EXPORT BaseHeap {
public:
- ThreadHeap(ThreadState*, int);
- virtual ~ThreadHeap();
+ BaseHeap(ThreadState*, int);
+ virtual ~BaseHeap();
void cleanupPages();
#if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
- BaseHeapPage* findPageFromAddress(Address);
+ BasePage* findPageFromAddress(Address);
#endif
#if ENABLE(GC_PROFILING)
void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
@@ -754,8 +754,8 @@ public:
}
protected:
- BaseHeapPage* m_firstPage;
- BaseHeapPage* m_firstUnsweptPage;
+ BasePage* m_firstPage;
+ BasePage* m_firstUnsweptPage;
private:
virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0;
@@ -767,9 +767,9 @@ private:
int m_index;
};
-class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap {
+class PLATFORM_EXPORT NormalPageHeap final : public BaseHeap {
public:
- ThreadHeapForHeapPage(ThreadState*, int);
+ NormalPageHeap(ThreadState*, int);
void addToFreeList(Address address, size_t size)
{
ASSERT(findPageFromAddress(address));
@@ -785,7 +785,7 @@ public:
inline Address allocate(size_t payloadSize, size_t gcInfoIndex);
inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
- void freePage(HeapPage*);
+ void freePage(NormalPage*);
bool coalesce();
void promptlyFreeObject(HeapObjectHeader*);
@@ -826,16 +826,16 @@ private:
#endif
};
-class ThreadHeapForLargeObject final : public ThreadHeap {
+class LargeObjectHeap final : public BaseHeap {
public:
- ThreadHeapForLargeObject(ThreadState*, int);
- Address allocateLargeObject(size_t, size_t gcInfoIndex);
- void freeLargeObject(LargeObject*);
+ LargeObjectHeap(ThreadState*, int);
+ Address allocateLargeObjectPage(size_t, size_t gcInfoIndex);
+ void freeLargeObjectPage(LargeObjectPage*);
#if ENABLE(ASSERT)
virtual bool isConsistentForSweeping() override { return true; }
#endif
private:
- Address doAllocateLargeObject(size_t, size_t gcInfoIndex);
+ Address doAllocateLargeObjectPage(size_t, size_t gcInfoIndex);
virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
};
@@ -843,10 +843,10 @@ private:
// pages are aligned at blinkPageBase plus an OS page size.
// FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
// typed heaps. This is only exported to enable tests in HeapTest.cpp.
-PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object)
+PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object)
{
Address address = reinterpret_cast<Address>(const_cast<void*>(object));
- BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + WTF::kSystemPageSize);
+ BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + WTF::kSystemPageSize);
ASSERT(page->contains(address));
return page;
}
@@ -858,8 +858,8 @@ public:
static void doShutdown();
#if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
- static BaseHeapPage* findPageFromAddress(Address);
- static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFromAddress(reinterpret_cast<Address>(pointer)); }
+ static BasePage* findPageFromAddress(Address);
+ static BasePage* findPageFromAddress(void* pointer) { return findPageFromAddress(reinterpret_cast<Address>(pointer)); }
static bool containedInHeapOrOrphanedPage(void*);
#endif
@@ -876,7 +876,7 @@ public:
{
static_assert(IsGarbageCollectedType<T>::value, "only objects deriving from GarbageCollected can be used.");
#if ENABLE(OILPAN)
- BaseHeapPage* page = pageFromObject(objectPointer);
+ BasePage* page = pageFromObject(objectPointer);
if (page->hasBeenSwept())
return false;
ASSERT(page->heap()->threadState()->isSweepingInProgress());
@@ -979,7 +979,7 @@ public:
// This look-up uses the region search tree and a negative contains cache to
// provide an efficient mapping from arbitrary addresses to the containing
// heap-page if one exists.
- static BaseHeapPage* lookup(Address);
+ static BasePage* lookup(Address);
static void addPageMemoryRegion(PageMemoryRegion*);
static void removePageMemoryRegion(PageMemoryRegion*);
@@ -1268,9 +1268,9 @@ size_t HeapObjectHeader::size() const
size_t result = m_encoded & headerSizeMask;
// Large objects should not refer to header->size().
// The actual size of a large object is stored in
- // LargeObject::m_payloadSize.
+ // LargeObjectPage::m_payloadSize.
ASSERT(result != largeObjectSizeInHeader);
- ASSERT(!pageFromObject(this)->isLargeObject());
+ ASSERT(!pageFromObject(this)->isLargeObjectPage());
return result;
}
@@ -1295,10 +1295,10 @@ size_t HeapObjectHeader::payloadSize()
{
size_t size = m_encoded & headerSizeMask;
if (UNLIKELY(size == largeObjectSizeInHeader)) {
- ASSERT(pageFromObject(this)->isLargeObject());
- return static_cast<LargeObject*>(pageFromObject(this))->payloadSize();
+ ASSERT(pageFromObject(this)->isLargeObjectPage());
+ return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize();
}
- ASSERT(!pageFromObject(this)->isLargeObject());
+ ASSERT(!pageFromObject(this)->isLargeObjectPage());
return size - sizeof(HeapObjectHeader);
}
@@ -1340,7 +1340,7 @@ void HeapObjectHeader::markDead()
m_encoded |= headerDeadBitMask;
}
-size_t ThreadHeap::allocationSizeFromSize(size_t size)
+size_t BaseHeap::allocationSizeFromSize(size_t size)
{
// Check the size before computing the actual allocation size. The
// allocation size calculation can overflow for large sizes and the check
@@ -1354,7 +1354,7 @@ size_t ThreadHeap::allocationSizeFromSize(size_t size)
return allocationSize;
}
-Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcInfoIndex)
+Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
{
#if ENABLE(GC_PROFILING)
m_cumulativeAllocationSize += allocationSize;
@@ -1382,14 +1382,14 @@ Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
return outOfLineAllocate(allocationSize, gcInfoIndex);
}
-Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex)
+Address NormalPageHeap::allocate(size_t size, size_t gcInfoIndex)
{
return allocateObject(allocationSizeFromSize(size), gcInfoIndex);
}
template<typename T>
struct HeapIndexTrait {
- static int index() { return GeneralHeap; };
+ static int index() { return NormalPageHeapIndex; };
};
// FIXME: The forward declaration is layering violation.
@@ -1407,7 +1407,7 @@ Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
{
ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
ASSERT(state->isAllocationAllowed());
- return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate(size, gcInfoIndex);
+ return static_cast<NormalPageHeap*>(state->heap(heapIndex))->allocate(size, gcInfoIndex);
}
template<typename T>
@@ -1446,7 +1446,7 @@ public:
static size_t quantizedSize(size_t count)
{
RELEASE_ASSERT(count <= kMaxUnquantizedAllocation / sizeof(T));
- return ThreadHeap::roundedAllocationSize(count * sizeof(T));
+ return BaseHeap::roundedAllocationSize(count * sizeof(T));
}
static const size_t kMaxUnquantizedAllocation = maxHeapObjectSize;
};
@@ -1463,7 +1463,7 @@ public:
static T* allocateVectorBacking(size_t size)
{
size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>::index();
- return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorBackingHeap, gcInfoIndex));
+ return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, VectorHeapIndex, gcInfoIndex));
}
PLATFORM_EXPORT static void freeVectorBacking(void* address);
PLATFORM_EXPORT static bool expandVectorBacking(void*, size_t);
@@ -1476,7 +1476,7 @@ public:
static T* allocateInlineVectorBacking(size_t size)
{
size_t gcInfoIndex = GCInfoTrait<HeapVectorBacking<T, VectorTraits<T>>>::index();
- return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVectorBackingHeap, gcInfoIndex));
+ return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, InlineVectorHeapIndex, gcInfoIndex));
}
PLATFORM_EXPORT static void freeInlineVectorBacking(void* address);
PLATFORM_EXPORT static bool expandInlineVectorBacking(void*, size_t);
@@ -1491,7 +1491,7 @@ public:
static T* allocateHashTableBacking(size_t size)
{
size_t gcInfoIndex = GCInfoTrait<HeapHashTableBacking<HashTable>>::index();
- return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTableBackingHeap, gcInfoIndex));
+ return reinterpret_cast<T*>(Heap::allocateOnHeapIndex<T>(size, HashTableHeapIndex, gcInfoIndex));
}
template <typename T, typename HashTable>
static T* allocateZeroedHashTableBacking(size_t size)
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698