Chromium Code Reviews| Index: Source/platform/heap/Heap.cpp |
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
| index 9dddb4a39bcc2d0e7c440d37ba35ea3befd19fbe..afb8672b2a5c5629c81933999798d7e6ac31c186 100644 |
| --- a/Source/platform/heap/Heap.cpp |
| +++ b/Source/platform/heap/Heap.cpp |
| @@ -125,7 +125,6 @@ public: |
| return m_base <= addr && addr < (m_base + m_size); |
| } |
| - |
| bool contains(const MemoryRegion& other) const |
| { |
| return contains(other.m_base) && contains(other.m_base + other.m_size - 1); |
| @@ -187,10 +186,24 @@ public: |
| release(); |
| } |
| - void pageRemoved() |
| + void pageDeleted(Address page) |
| { |
| - if (!--m_numPages) |
| + decommitPage(page); |
|
Mads Ager (chromium)
2014/10/01 11:29:01
Can we use another name than decommit/commit here?
zerny-chromium
2014/10/01 12:05:34
Done.
|
| + if (!--m_numPages) { |
| + Heap::removePageMemoryRegion(this); |
| delete this; |
| + } |
| + } |
| + |
| + void commitPage(Address page) |
| + { |
| + ASSERT(!m_committed[index(page)]); |
| + m_committed[index(page)] = true; |
| + } |
| + |
| + void decommitPage(Address page) |
| + { |
| + m_committed[index(page)] = false; |
| } |
| static PageMemoryRegion* allocate(size_t size, unsigned numPages) |
| @@ -274,13 +287,36 @@ public: |
| #endif |
| } |
| + BaseHeapPage* pageFromAddress(Address address) |
| + { |
| + ASSERT(contains(address)); |
| + if (!m_committed[index(address)]) |
| + return 0; |
| + if (m_isLargePage) |
| + return pageHeaderFromObject(base()); |
| + return pageHeaderFromObject(address); |
| + } |
| + |
| private: |
| PageMemoryRegion(Address base, size_t size, unsigned numPages) |
| : MemoryRegion(base, size) |
| + , m_isLargePage(numPages == 1) |
| , m_numPages(numPages) |
| { |
| + for (size_t i = 0; i < blinkPagesPerRegion; ++i) |
| + m_committed[i] = false; |
| + } |
| + |
| + unsigned index(Address address) |
| + { |
| + ASSERT(contains(address)); |
| + if (m_isLargePage) |
| + return 0; |
| + return (address - base()) / blinkPageSize; |
|
zerny-chromium
2014/10/01 10:34:59
This was incorrect. Forgot to add the guard page t
zerny-chromium
2014/10/01 11:06:33
No, it is correct. The first guard page is part of
|
| } |
| + bool m_isLargePage; |
| + bool m_committed[blinkPagesPerRegion]; |
|
zerny-chromium
2014/10/01 09:28:54
Regarding PageMemory state in the region, it seems
Mads Ager (chromium)
2014/10/01 11:29:01
Should we use slightly different terminology here?
zerny-chromium
2014/10/01 12:05:34
Done.
|
| unsigned m_numPages; |
| }; |
| @@ -304,11 +340,22 @@ public: |
| ~PageMemory() |
| { |
| __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| - m_reserved->pageRemoved(); |
| + m_reserved->pageDeleted(writableStart()); |
| } |
| - bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } |
| - void decommit() { m_writable.decommit(); } |
| + bool commit() WARN_UNUSED_RETURN |
| + { |
| + m_reserved->commitPage(writableStart()); |
| + return m_writable.commit(); |
| + } |
| + |
| + void decommit() |
| + { |
| + m_reserved->decommitPage(writableStart()); |
| + m_writable.decommit(); |
| + } |
| + |
| + PageMemoryRegion* region() { return m_reserved; } |
| Address writableStart() { return m_writable.base(); } |
| @@ -609,7 +656,6 @@ template<typename Header> |
| void ThreadHeap<Header>::cleanupPages() |
| { |
| clearFreeLists(); |
| - flushHeapContainsCache(); |
| // Add the ThreadHeap's pages to the orphanedPagePool. |
| for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) |
| @@ -925,6 +971,7 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
| threadState()->setGCRequested(); |
| Heap::flushHeapDoesNotContainCache(); |
| PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| + m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
| Address largeObjectAddress = pageMemory->writableStart(); |
| Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>(); |
| memset(headerAddress, 0, size); |
| @@ -945,7 +992,6 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
| template<typename Header> |
| void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext) |
| { |
| - flushHeapContainsCache(); |
| object->unlink(previousNext); |
| object->finalize(); |
| @@ -1149,7 +1195,6 @@ template <typename Header> |
| void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
| { |
| MutexLocker locker(m_threadState->sweepMutex()); |
| - flushHeapContainsCache(); |
| if (page->terminating()) { |
| // The thread is shutting down so this page is being removed as part |
| // of a thread local GC. In that case the page could be accessed in the |
| @@ -1183,6 +1228,7 @@ void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| // [ guard os page | ... payload ... | guard os page ] |
| // ^---{ aligned to blink page size } |
| PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion); |
| + m_threadState->allocatedRegionsSinceLastGC().append(region); |
| // Setup the PageMemory object for each of the pages in the |
| // region. |
| size_t offset = 0; |
| @@ -1749,18 +1795,16 @@ void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotI |
| } |
| #endif |
| -template<typename Entry> |
| -void HeapExtentCache<Entry>::flush() |
| +void HeapDoesNotContainCache::flush() |
| { |
| if (m_hasEntries) { |
| for (int i = 0; i < numberOfEntries; i++) |
| - m_entries[i] = Entry(); |
| + m_entries[i] = 0; |
| m_hasEntries = false; |
| } |
| } |
| -template<typename Entry> |
| -size_t HeapExtentCache<Entry>::hash(Address address) |
| +size_t HeapDoesNotContainCache::hash(Address address) |
| { |
| size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| value ^= value >> numberOfEntriesLog2; |
| @@ -1769,41 +1813,26 @@ size_t HeapExtentCache<Entry>::hash(Address address) |
| return value & ~1; // Returns only even number. |
| } |
| -template<typename Entry> |
| -typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address) |
| +bool HeapDoesNotContainCache::lookup(Address address) |
| { |
| size_t index = hash(address); |
| ASSERT(!(index & 1)); |
| Address cachePage = roundToBlinkPageStart(address); |
| - if (m_entries[index].address() == cachePage) |
| - return m_entries[index].result(); |
| - if (m_entries[index + 1].address() == cachePage) |
| - return m_entries[index + 1].result(); |
| + if (m_entries[index] == cachePage) |
| + return m_entries[index]; |
| + if (m_entries[index + 1] == cachePage) |
| + return m_entries[index + 1]; |
| return 0; |
| } |
| -template<typename Entry> |
| -void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry) |
| +void HeapDoesNotContainCache::addEntry(Address address) |
| { |
| m_hasEntries = true; |
| size_t index = hash(address); |
| ASSERT(!(index & 1)); |
| Address cachePage = roundToBlinkPageStart(address); |
| m_entries[index + 1] = m_entries[index]; |
| - m_entries[index] = Entry(cachePage, entry); |
| -} |
| - |
| -// These should not be needed, but it seems impossible to persuade clang to |
| -// instantiate the template functions and export them from a shared library, so |
| -// we add these in the non-templated subclass, which does not have that issue. |
| -void HeapContainsCache::addEntry(Address address, BaseHeapPage* page) |
| -{ |
| - HeapExtentCache<PositiveEntry>::addEntry(address, page); |
| -} |
| - |
| -BaseHeapPage* HeapContainsCache::lookup(Address address) |
| -{ |
| - return HeapExtentCache<PositiveEntry>::lookup(address); |
| + m_entries[index] = cachePage; |
| } |
| void Heap::flushHeapDoesNotContainCache() |
| @@ -2106,6 +2135,8 @@ void Heap::doShutdown() |
| s_markingStack = 0; |
| delete s_ephemeronStack; |
| s_ephemeronStack = 0; |
| + delete s_regionTree; |
| + s_regionTree = 0; |
| ThreadState::shutdown(); |
| } |
| @@ -2137,22 +2168,21 @@ Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| return 0; |
| #endif |
| - ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(); |
| - for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| - if ((*it)->checkAndMarkPointer(visitor, address)) { |
| - // Pointer was in a page of that thread. If it actually pointed |
| - // into an object then that object was found and marked. |
| - ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| - s_lastGCWasConservative = true; |
| - return address; |
| - } |
| + if (BaseHeapPage* page = lookup(address)) { |
| + ASSERT(page->contains(address)); |
| + ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| + // TODO: What if the thread owning this page is terminating? |
|
zerny-chromium
2014/10/01 09:28:54
This issue changes the objects which might be mark
Mads Ager (chromium)
2014/10/01 11:29:01
My gut reaction is that it would be best to filter
zerny-chromium
2014/10/01 12:05:34
As discussed offline, we will mark the pointer on
|
| + page->checkAndMarkPointer(visitor, address); |
| + // TODO: We only need to set the conservative flag if checkAndMarkPointer actually marked the pointer. |
| + s_lastGCWasConservative = true; |
| + return address; |
| } |
| #if !ENABLE(ASSERT) |
| - s_heapDoesNotContainCache->addEntry(address, true); |
| + s_heapDoesNotContainCache->addEntry(address); |
| #else |
| if (!s_heapDoesNotContainCache->lookup(address)) |
| - s_heapDoesNotContainCache->addEntry(address, true); |
| + s_heapDoesNotContainCache->addEntry(address); |
| #endif |
| return 0; |
| } |
| @@ -2710,6 +2740,81 @@ void HeapAllocator::backingFree(void* address) |
| heap->promptlyFreeObject(header); |
| } |
| +BaseHeapPage* Heap::lookup(Address address) |
| +{ |
| + if (!s_regionTree) |
| + return 0; |
| + if (PageMemoryRegion* region = s_regionTree->lookup(address)) |
| + return region->pageFromAddress(address); |
| + return 0; |
| +} |
| + |
| +void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| +{ |
| + // Deletion of large objects (and thus their region) can happen concurrently |
| + // on sweeper threads. Removal can also happen during thread shutdown, but |
| + // that case is safe. Regardless, we make removal mutually exclusive and |
| + // reuse the marking mutex which is not in use during any of the two cases. |
| + MutexLocker locker(markingMutex()); |
|
Mads Ager (chromium)
2014/10/01 11:29:00
Nit: Maybe just introduced a regionTreeMutex that
zerny-chromium
2014/10/01 12:05:34
Done.
|
| + RegionTree::remove(region, &s_regionTree); |
| +} |
| + |
| +void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| +{ |
| + // No locking because regions are only added in ThreadState::prepareForGC which is in a GCScope. |
|
Mads Ager (chromium)
2014/10/01 11:29:01
Can we add an assert here to trigger if we ever at
zerny-chromium
2014/10/01 12:05:34
Done.
|
| + RegionTree::add(new RegionTree(region), &s_regionTree); |
| +} |
| + |
| +PageMemoryRegion* Heap::RegionTree::lookup(Address address) |
| +{ |
| + RegionTree* current = s_regionTree; |
| + while (current) { |
| + Address base = current->m_region->base(); |
| + if (address < base) { |
| + current = current->m_left; |
| + continue; |
| + } |
| + if (address >= base + current->m_region->size()) { |
| + current = current->m_right; |
| + continue; |
| + } |
| + ASSERT(current->m_region->contains(address)); |
| + return current->m_region; |
| + } |
| + return 0; |
| +} |
| + |
| +void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) |
| +{ |
| + if (!newTree) |
|
Mads Ager (chromium)
2014/10/01 11:29:01
ASSERT(newTree) instead? Do we ever actually want
zerny-chromium
2014/10/01 12:05:34
I use the possibility of adding a null in RegionTr
|
| + return; |
| + Address base = newTree->m_region->base(); |
| + for (RegionTree* current = *context; current; current = *context) { |
| + context = (base < current->m_region->base()) ? ¤t->m_left : ¤t->m_right; |
| + } |
| + *context = newTree; |
| +} |
| + |
| +void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) |
| +{ |
| + ASSERT(region); |
| + ASSERT(context); |
| + Address base = region->base(); |
| + RegionTree* current = *context; |
| + ASSERT(current); |
| + while (region != current->m_region) { |
| + context = (base < current->m_region->base()) ? ¤t->m_left : ¤t->m_right; |
| + current = *context; |
| + ASSERT(current); |
| + } |
| + *context = 0; |
| + add(current->m_left, context); |
| + add(current->m_right, context); |
| + current->m_left = 0; |
| + current->m_right = 0; |
| + delete current; |
| +} |
| + |
| // Force template instantiations for the types that we need. |
| template class HeapPage<FinalizedHeapObjectHeader>; |
| template class HeapPage<HeapObjectHeader>; |
| @@ -2727,4 +2832,6 @@ bool Heap::s_shutdownCalled = false; |
| bool Heap::s_lastGCWasConservative = false; |
| FreePagePool* Heap::s_freePagePool; |
| OrphanedPagePool* Heap::s_orphanedPagePool; |
| +Heap::RegionTree* Heap::s_regionTree = 0; |
| + |
| } |