Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(847)

Unified Diff: third_party/WebKit/Source/platform/heap/CallbackStack.cpp

Issue 2127453002: Oilpan: Introduce memory pool for CallbackStacks (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/CallbackStack.cpp
diff --git a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
index 7a13b0b32f3a24d94aef0ebf9cbc6767e19d92a7..b1696e2dbc58b9839f59328295a1bda61469f8b0 100644
--- a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
+++ b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
@@ -7,28 +7,74 @@
namespace blink {
-size_t const CallbackStack::kMinimalBlockSize = WTF::kPageAllocationGranularity / sizeof(CallbackStack::Item);
+CallbackStackMemoryPool& CallbackStackMemoryPool::instance()
+{
+ DEFINE_STATIC_LOCAL(CallbackStackMemoryPool, memoryPool, ());
+ return memoryPool;
+}
+
+void CallbackStackMemoryPool::initialize()
+{
+ MutexLocker locker(m_mutex);
sof 2016/07/05 14:19:02 Do you need to take this lock?
+ m_freeListFirst = 0;
+ for (size_t index = 0; index < kPooledBlockCount - 1; ++index) {
+ m_freeListNext[index] = index + 1;
+ }
+ m_freeListNext[kPooledBlockCount - 1] = -1;
+ m_pooledMemory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockSize * kPooledBlockCount * sizeof(CallbackStack::Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
+ CHECK(m_pooledMemory);
+}
+
+void CallbackStackMemoryPool::shutdown()
+{
+ MutexLocker locker(m_mutex);
+ WTF::freePages(m_pooledMemory, kBlockSize * kPooledBlockCount * sizeof(CallbackStack::Item));
sof 2016/07/05 14:19:01 Could you tidy up the use of the expressions (kPoo
sof 2016/07/05 14:19:01 clear m_pooledMemory after freeing?
+}
-CallbackStack::Block::Block(Block* next, size_t blockSize)
- : m_blockSize(blockSize)
+CallbackStack::Item* CallbackStackMemoryPool::allocate()
{
- // Allocated block size must be a multiple of WTF::kPageAllocationGranularity.
- ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0);
- m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof(Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
- RELEASE_ASSERT(m_buffer);
+ MutexLocker locker(m_mutex);
+ static_assert((kBlockSize * sizeof(CallbackStack::Item)) % WTF::kPageAllocationGranularity == 0, "Allocated block size must be a multiple of WTF::kPageAllocationGranularity");
haraken 2016/07/06 01:58:11 I don't think this assert is needed. So removed.
+ if (m_freeListFirst != -1) {
sof 2016/07/05 14:19:02 (A short comment explaining the two code paths her
+ size_t index = m_freeListFirst;
+ DCHECK(0 <= index && index < CallbackStackMemoryPool::kPooledBlockCount);
+ m_freeListFirst = m_freeListNext[index];
+ m_freeListNext[index] = -1;
+ return m_pooledMemory + kBlockSize * index;
+ }
+ CallbackStack::Item* memory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockSize * sizeof(CallbackStack::Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
+ CHECK(memory);
+ return memory;
+}
+void CallbackStackMemoryPool::free(CallbackStack::Item* memory)
+{
+ MutexLocker locker(m_mutex);
+ int index = (reinterpret_cast<uintptr_t>(memory) - reinterpret_cast<uintptr_t>(m_pooledMemory)) / (kBlockSize * sizeof(CallbackStack::Item));
+ if (index < 0 || static_cast<int>(kPooledBlockCount) <= index) {
+ WTF::freePages(memory, kBlockSize * sizeof(CallbackStack::Item));
+ return;
+ }
+ DCHECK_EQ(m_freeListNext[index], -1);
+ m_freeListNext[index] = m_freeListFirst;
+ m_freeListFirst = index;
+}
+
+CallbackStack::Block::Block(Block* next)
+{
+ m_buffer = CallbackStackMemoryPool::instance().allocate();
#if ENABLE(ASSERT)
clear();
#endif
- m_limit = &(m_buffer[m_blockSize]);
+ m_limit = &(m_buffer[CallbackStackMemoryPool::kBlockSize]);
m_current = &(m_buffer[0]);
m_next = next;
}
CallbackStack::Block::~Block()
{
- WTF::freePages(m_buffer, m_blockSize * sizeof(Item));
+ CallbackStackMemoryPool::instance().free(m_buffer);
m_buffer = nullptr;
m_limit = nullptr;
m_current = nullptr;
@@ -38,26 +84,11 @@ CallbackStack::Block::~Block()
#if ENABLE(ASSERT)
void CallbackStack::Block::clear()
{
- for (size_t i = 0; i < m_blockSize; i++)
+ for (size_t i = 0; i < CallbackStackMemoryPool::kBlockSize; i++)
m_buffer[i] = Item(0, 0);
}
#endif
-void CallbackStack::Block::decommit()
-{
- reset();
- WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item));
-}
-
-void CallbackStack::Block::reset()
-{
-#if ENABLE(ASSERT)
- clear();
-#endif
- m_current = &m_buffer[0];
- m_next = nullptr;
-}
-
void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor)
{
// This loop can tolerate entries being added by the callbacks after
@@ -80,57 +111,56 @@ bool CallbackStack::Block::hasCallbackForObject(const void* object)
}
#endif
-CallbackStack::CallbackStack(size_t blockSize)
- : m_first(new Block(nullptr, blockSize))
- , m_last(m_first)
+CallbackStack::CallbackStack()
+ : m_first(nullptr)
+ , m_last(nullptr)
{
}
CallbackStack::~CallbackStack()
{
- RELEASE_ASSERT(isEmpty());
- delete m_first;
+ CHECK(isEmpty());
m_first = nullptr;
m_last = nullptr;
}
-void CallbackStack::clear()
+void CallbackStack::commit()
{
- Block* next;
- for (Block* current = m_first->next(); current; current = next) {
- next = current->next();
- delete current;
- }
- m_first->reset();
+ DCHECK(!m_first);
+ m_first = new Block(m_first);
m_last = m_first;
}
void CallbackStack::decommit()
{
+ if (!m_first)
+ return;
Block* next;
for (Block* current = m_first->next(); current; current = next) {
next = current->next();
delete current;
}
- m_first->decommit();
- m_last = m_first;
+ delete m_first;
+ m_last = m_first = nullptr;
}
bool CallbackStack::isEmpty() const
{
- return hasJustOneBlock() && m_first->isEmptyBlock();
+ return !m_first || (hasJustOneBlock() && m_first->isEmptyBlock());
}
CallbackStack::Item* CallbackStack::allocateEntrySlow()
{
- ASSERT(!m_first->allocateEntry());
- m_first = new Block(m_first, m_first->blockSize());
+ DCHECK(m_first);
+ DCHECK(!m_first->allocateEntry());
+ m_first = new Block(m_first);
return m_first->allocateEntry();
}
CallbackStack::Item* CallbackStack::popSlow()
{
- ASSERT(m_first->isEmptyBlock());
+ DCHECK(m_first);
+ DCHECK(m_first->isEmptyBlock());
for (;;) {
Block* next = m_first->next();
@@ -169,7 +199,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
{
if (from == upto)
return;
- ASSERT(from);
+ DCHECK(from);
// Recurse first so we get to the newly added entries last.
invokeOldestCallbacks(from->next(), upto, visitor);
from->invokeEphemeronCallbacks(visitor);
@@ -177,6 +207,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
bool CallbackStack::hasJustOneBlock() const
{
+ DCHECK(m_first);
return !m_first->next();
}

Powered by Google App Engine
This is Rietveld 408576698