Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(992)

Unified Diff: third_party/WebKit/Source/platform/heap/CallbackStack.cpp

Issue 2127453002: Oilpan: Introduce memory pool for CallbackStacks (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: temp Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/CallbackStack.cpp
diff --git a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
index 7a13b0b32f3a24d94aef0ebf9cbc6767e19d92a7..fa35b6a530ec0a7929624a7842a84f4f524979b7 100644
--- a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
+++ b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
@@ -3,32 +3,82 @@
// found in the LICENSE file.
#include "platform/heap/CallbackStack.h"
+#include "wtf/PtrUtil.h"
#include "wtf/allocator/PageAllocator.h"
namespace blink {
-size_t const CallbackStack::kMinimalBlockSize = WTF::kPageAllocationGranularity / sizeof(CallbackStack::Item);
haraken 2016/07/06 02:01:38 I noticed that WTF::kPageAllocationGranularity is
sof 2016/07/06 06:03:21 I wouldn't worry too much -- the non-main thread t
+CallbackStackMemoryPool& CallbackStackMemoryPool::instance()
+{
+ DEFINE_STATIC_LOCAL(CallbackStackMemoryPool, memoryPool, ());
+ return memoryPool;
+}
+
+void CallbackStackMemoryPool::initialize()
+{
+ m_freeListFirst = 0;
+ for (size_t index = 0; index < kPooledBlockCount - 1; ++index) {
+ m_freeListNext[index] = index + 1;
+ }
+ m_freeListNext[kPooledBlockCount - 1] = -1;
+ m_pooledMemory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockBytes * kPooledBlockCount, WTF::kPageAllocationGranularity, WTF::PageAccessible));
+ CHECK(m_pooledMemory);
+}
+
+void CallbackStackMemoryPool::shutdown()
+{
+ WTF::freePages(m_pooledMemory, kBlockBytes * kPooledBlockCount);
+ m_pooledMemory = nullptr;
+ m_freeListFirst = 0;
+}
+
+CallbackStack::Item* CallbackStackMemoryPool::allocate()
+{
+ MutexLocker locker(m_mutex);
+ // Allocate from a free list if available.
+ if (m_freeListFirst != -1) {
+ size_t index = m_freeListFirst;
+ DCHECK(0 <= index && index < CallbackStackMemoryPool::kPooledBlockCount);
+ m_freeListFirst = m_freeListNext[index];
+ m_freeListNext[index] = -1;
+ return m_pooledMemory + kBlockSize * index;
+ }
+ // Otherwise, allocate a new memory region.
+ CallbackStack::Item* memory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockBytes, WTF::kPageAllocationGranularity, WTF::PageAccessible));
sof 2016/07/06 13:22:17 The block size/length isn't a multiple of kPageAll
haraken 2016/07/06 13:27:29 On Windows, kPageAllocationGranularity is 64 KB. D
sof 2016/07/06 13:33:07 If you want to use allocPages().. but why insist o
+ CHECK(memory);
+ return memory;
+}
-CallbackStack::Block::Block(Block* next, size_t blockSize)
- : m_blockSize(blockSize)
+void CallbackStackMemoryPool::free(CallbackStack::Item* memory)
{
- // Allocated block size must be a multiple of WTF::kPageAllocationGranularity.
- ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0);
- m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof(Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
- RELEASE_ASSERT(m_buffer);
+ MutexLocker locker(m_mutex);
+ int index = (reinterpret_cast<uintptr_t>(memory) - reinterpret_cast<uintptr_t>(m_pooledMemory)) / (kBlockSize * sizeof(CallbackStack::Item));
+ // If the memory is a newly allocated region, free the memory.
+ if (index < 0 || static_cast<int>(kPooledBlockCount) <= index) {
+ WTF::freePages(memory, kBlockBytes);
+ return;
+ }
+ // Otherwise, return the memory back to the free list.
+ DCHECK_EQ(m_freeListNext[index], -1);
+ m_freeListNext[index] = m_freeListFirst;
+ m_freeListFirst = index;
+}
+CallbackStack::Block::Block(Block* next)
+{
+ m_buffer = CallbackStackMemoryPool::instance().allocate();
#if ENABLE(ASSERT)
clear();
#endif
- m_limit = &(m_buffer[m_blockSize]);
+ m_limit = &(m_buffer[CallbackStackMemoryPool::kBlockSize]);
m_current = &(m_buffer[0]);
m_next = next;
}
CallbackStack::Block::~Block()
{
- WTF::freePages(m_buffer, m_blockSize * sizeof(Item));
+ CallbackStackMemoryPool::instance().free(m_buffer);
m_buffer = nullptr;
m_limit = nullptr;
m_current = nullptr;
@@ -38,26 +88,11 @@ CallbackStack::Block::~Block()
#if ENABLE(ASSERT)
void CallbackStack::Block::clear()
{
- for (size_t i = 0; i < m_blockSize; i++)
+ for (size_t i = 0; i < CallbackStackMemoryPool::kBlockSize; i++)
m_buffer[i] = Item(0, 0);
}
#endif
-void CallbackStack::Block::decommit()
-{
- reset();
- WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item));
-}
-
-void CallbackStack::Block::reset()
-{
-#if ENABLE(ASSERT)
- clear();
-#endif
- m_current = &m_buffer[0];
- m_next = nullptr;
-}
-
void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor)
{
// This loop can tolerate entries being added by the callbacks after
@@ -80,57 +115,61 @@ bool CallbackStack::Block::hasCallbackForObject(const void* object)
}
#endif
-CallbackStack::CallbackStack(size_t blockSize)
- : m_first(new Block(nullptr, blockSize))
- , m_last(m_first)
+std::unique_ptr<CallbackStack> CallbackStack::create()
+{
+ return wrapUnique(new CallbackStack());
+}
+
+CallbackStack::CallbackStack()
+ : m_first(nullptr)
+ , m_last(nullptr)
{
}
CallbackStack::~CallbackStack()
{
- RELEASE_ASSERT(isEmpty());
- delete m_first;
+ CHECK(isEmpty());
m_first = nullptr;
m_last = nullptr;
}
-void CallbackStack::clear()
+void CallbackStack::commit()
{
- Block* next;
- for (Block* current = m_first->next(); current; current = next) {
- next = current->next();
- delete current;
- }
- m_first->reset();
+ DCHECK(!m_first);
+ m_first = new Block(m_first);
m_last = m_first;
}
void CallbackStack::decommit()
{
+ if (!m_first)
+ return;
Block* next;
for (Block* current = m_first->next(); current; current = next) {
next = current->next();
delete current;
}
- m_first->decommit();
- m_last = m_first;
+ delete m_first;
+ m_last = m_first = nullptr;
}
bool CallbackStack::isEmpty() const
{
- return hasJustOneBlock() && m_first->isEmptyBlock();
+ return !m_first || (hasJustOneBlock() && m_first->isEmptyBlock());
}
CallbackStack::Item* CallbackStack::allocateEntrySlow()
{
- ASSERT(!m_first->allocateEntry());
- m_first = new Block(m_first, m_first->blockSize());
+ DCHECK(m_first);
+ DCHECK(!m_first->allocateEntry());
+ m_first = new Block(m_first);
return m_first->allocateEntry();
}
CallbackStack::Item* CallbackStack::popSlow()
{
- ASSERT(m_first->isEmptyBlock());
+ DCHECK(m_first);
+ DCHECK(m_first->isEmptyBlock());
for (;;) {
Block* next = m_first->next();
@@ -169,7 +208,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
{
if (from == upto)
return;
- ASSERT(from);
+ DCHECK(from);
// Recurse first so we get to the newly added entries last.
invokeOldestCallbacks(from->next(), upto, visitor);
from->invokeEphemeronCallbacks(visitor);
@@ -177,6 +216,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
bool CallbackStack::hasJustOneBlock() const
{
+ DCHECK(m_first);
return !m_first->next();
}
« no previous file with comments | « third_party/WebKit/Source/platform/heap/CallbackStack.h ('k') | third_party/WebKit/Source/platform/heap/Heap.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698