| Index: third_party/WebKit/Source/platform/heap/CallbackStack.cpp
|
| diff --git a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
|
| index 7a13b0b32f3a24d94aef0ebf9cbc6767e19d92a7..9a25d1b5116c30380f54fd4583631aa72f4f6981 100644
|
| --- a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
|
| +++ b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
|
| @@ -3,32 +3,83 @@
|
| // found in the LICENSE file.
|
|
|
| #include "platform/heap/CallbackStack.h"
|
| +#include "wtf/PtrUtil.h"
|
| #include "wtf/allocator/PageAllocator.h"
|
| +#include "wtf/allocator/Partitions.h"
|
|
|
| namespace blink {
|
|
|
| -size_t const CallbackStack::kMinimalBlockSize = WTF::kPageAllocationGranularity / sizeof(CallbackStack::Item);
|
| +CallbackStackMemoryPool& CallbackStackMemoryPool::instance()
|
| +{
|
| + DEFINE_STATIC_LOCAL(CallbackStackMemoryPool, memoryPool, ());
|
| + return memoryPool;
|
| +}
|
| +
|
| +void CallbackStackMemoryPool::initialize()
|
| +{
|
| + m_freeListFirst = 0;
|
| + for (size_t index = 0; index < kPooledBlockCount - 1; ++index) {
|
| + m_freeListNext[index] = index + 1;
|
| + }
|
| + m_freeListNext[kPooledBlockCount - 1] = -1;
|
| + m_pooledMemory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockBytes * kPooledBlockCount, WTF::kPageAllocationGranularity, WTF::PageAccessible));
|
| + CHECK(m_pooledMemory);
|
| +}
|
| +
|
| +void CallbackStackMemoryPool::shutdown()
|
| +{
|
| + WTF::freePages(m_pooledMemory, kBlockBytes * kPooledBlockCount);
|
| + m_pooledMemory = nullptr;
|
| + m_freeListFirst = 0;
|
| +}
|
| +
|
| +CallbackStack::Item* CallbackStackMemoryPool::allocate()
|
| +{
|
| + MutexLocker locker(m_mutex);
|
| + // Allocate from a free list if available.
|
| + if (m_freeListFirst != -1) {
|
| + size_t index = m_freeListFirst;
|
| + DCHECK(0 <= index && index < CallbackStackMemoryPool::kPooledBlockCount);
|
| + m_freeListFirst = m_freeListNext[index];
|
| + m_freeListNext[index] = -1;
|
| + return m_pooledMemory + kBlockSize * index;
|
| + }
|
| + // Otherwise, allocate a new memory region.
|
| + CallbackStack::Item* memory = static_cast<CallbackStack::Item*>(WTF::Partitions::fastZeroedMalloc(kBlockBytes, "CallbackStackMemoryPool"));
|
| + CHECK(memory);
|
| + return memory;
|
| +}
|
|
|
| -CallbackStack::Block::Block(Block* next, size_t blockSize)
|
| - : m_blockSize(blockSize)
|
| +void CallbackStackMemoryPool::free(CallbackStack::Item* memory)
|
| {
|
| - // Allocated block size must be a multiple of WTF::kPageAllocationGranularity.
|
| - ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0);
|
| - m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof(Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
|
| - RELEASE_ASSERT(m_buffer);
|
| + MutexLocker locker(m_mutex);
|
| + int index = (reinterpret_cast<uintptr_t>(memory) - reinterpret_cast<uintptr_t>(m_pooledMemory)) / (kBlockSize * sizeof(CallbackStack::Item));
|
| + // If the memory is a newly allocated region, free the memory.
|
| + if (index < 0 || static_cast<int>(kPooledBlockCount) <= index) {
|
| + WTF::Partitions::fastFree(memory);
|
| + return;
|
| + }
|
| + // Otherwise, return the memory back to the free list.
|
| + DCHECK_EQ(m_freeListNext[index], -1);
|
| + m_freeListNext[index] = m_freeListFirst;
|
| + m_freeListFirst = index;
|
| +}
|
|
|
| +CallbackStack::Block::Block(Block* next)
|
| +{
|
| + m_buffer = CallbackStackMemoryPool::instance().allocate();
|
| #if ENABLE(ASSERT)
|
| clear();
|
| #endif
|
|
|
| - m_limit = &(m_buffer[m_blockSize]);
|
| + m_limit = &(m_buffer[CallbackStackMemoryPool::kBlockSize]);
|
| m_current = &(m_buffer[0]);
|
| m_next = next;
|
| }
|
|
|
| CallbackStack::Block::~Block()
|
| {
|
| - WTF::freePages(m_buffer, m_blockSize * sizeof(Item));
|
| + CallbackStackMemoryPool::instance().free(m_buffer);
|
| m_buffer = nullptr;
|
| m_limit = nullptr;
|
| m_current = nullptr;
|
| @@ -38,26 +89,11 @@ CallbackStack::Block::~Block()
|
| #if ENABLE(ASSERT)
|
| void CallbackStack::Block::clear()
|
| {
|
| - for (size_t i = 0; i < m_blockSize; i++)
|
| + for (size_t i = 0; i < CallbackStackMemoryPool::kBlockSize; i++)
|
| m_buffer[i] = Item(0, 0);
|
| }
|
| #endif
|
|
|
| -void CallbackStack::Block::decommit()
|
| -{
|
| - reset();
|
| - WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item));
|
| -}
|
| -
|
| -void CallbackStack::Block::reset()
|
| -{
|
| -#if ENABLE(ASSERT)
|
| - clear();
|
| -#endif
|
| - m_current = &m_buffer[0];
|
| - m_next = nullptr;
|
| -}
|
| -
|
| void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor)
|
| {
|
| // This loop can tolerate entries being added by the callbacks after
|
| @@ -80,57 +116,61 @@ bool CallbackStack::Block::hasCallbackForObject(const void* object)
|
| }
|
| #endif
|
|
|
| -CallbackStack::CallbackStack(size_t blockSize)
|
| - : m_first(new Block(nullptr, blockSize))
|
| - , m_last(m_first)
|
| +std::unique_ptr<CallbackStack> CallbackStack::create()
|
| +{
|
| + return wrapUnique(new CallbackStack());
|
| +}
|
| +
|
| +CallbackStack::CallbackStack()
|
| + : m_first(nullptr)
|
| + , m_last(nullptr)
|
| {
|
| }
|
|
|
| CallbackStack::~CallbackStack()
|
| {
|
| - RELEASE_ASSERT(isEmpty());
|
| - delete m_first;
|
| + CHECK(isEmpty());
|
| m_first = nullptr;
|
| m_last = nullptr;
|
| }
|
|
|
| -void CallbackStack::clear()
|
| +void CallbackStack::commit()
|
| {
|
| - Block* next;
|
| - for (Block* current = m_first->next(); current; current = next) {
|
| - next = current->next();
|
| - delete current;
|
| - }
|
| - m_first->reset();
|
| + DCHECK(!m_first);
|
| + m_first = new Block(m_first);
|
| m_last = m_first;
|
| }
|
|
|
| void CallbackStack::decommit()
|
| {
|
| + if (!m_first)
|
| + return;
|
| Block* next;
|
| for (Block* current = m_first->next(); current; current = next) {
|
| next = current->next();
|
| delete current;
|
| }
|
| - m_first->decommit();
|
| - m_last = m_first;
|
| + delete m_first;
|
| + m_last = m_first = nullptr;
|
| }
|
|
|
| bool CallbackStack::isEmpty() const
|
| {
|
| - return hasJustOneBlock() && m_first->isEmptyBlock();
|
| + return !m_first || (hasJustOneBlock() && m_first->isEmptyBlock());
|
| }
|
|
|
| CallbackStack::Item* CallbackStack::allocateEntrySlow()
|
| {
|
| - ASSERT(!m_first->allocateEntry());
|
| - m_first = new Block(m_first, m_first->blockSize());
|
| + DCHECK(m_first);
|
| + DCHECK(!m_first->allocateEntry());
|
| + m_first = new Block(m_first);
|
| return m_first->allocateEntry();
|
| }
|
|
|
| CallbackStack::Item* CallbackStack::popSlow()
|
| {
|
| - ASSERT(m_first->isEmptyBlock());
|
| + DCHECK(m_first);
|
| + DCHECK(m_first->isEmptyBlock());
|
|
|
| for (;;) {
|
| Block* next = m_first->next();
|
| @@ -169,7 +209,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
|
| {
|
| if (from == upto)
|
| return;
|
| - ASSERT(from);
|
| + DCHECK(from);
|
| // Recurse first so we get to the newly added entries last.
|
| invokeOldestCallbacks(from->next(), upto, visitor);
|
| from->invokeEphemeronCallbacks(visitor);
|
| @@ -177,6 +217,7 @@ void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
|
|
|
| bool CallbackStack::hasJustOneBlock() const
|
| {
|
| + DCHECK(m_first);
|
| return !m_first->next();
|
| }
|
|
|
|
|