| Index: Source/platform/heap/BufferAllocator.cpp
|
| diff --git a/Source/platform/heap/BufferAllocator.cpp b/Source/platform/heap/BufferAllocator.cpp
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..e1c57bfd269350c34c6806bf2e109e76bc545774
|
| --- /dev/null
|
| +++ b/Source/platform/heap/BufferAllocator.cpp
|
| @@ -0,0 +1,247 @@
|
| +// Copyright 2015 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "config.h"
|
| +#include "platform/heap/BufferAllocator.h"
|
| +
|
| +#include <stdio.h>
|
| +
|
| +namespace blink {
|
| +
|
| +int BufferAllocator::s_lock = 0;
|
| +ThreadState* BufferAllocator::s_state = nullptr;
|
| +bool BufferAllocator::s_ignoreHitRateCount = false;
|
| +
|
| +void BufferAllocator::initialize()
|
| +{
|
| + s_state = new ThreadState(false);
|
| +}
|
| +
|
| +void BufferAllocator::shutdown()
|
| +{
|
| + s_state->cleanupPages();
|
| + delete s_state;
|
| +}
|
| +
|
| +void BufferAllocator::printStats()
|
| +{
|
| + fprintf(stderr, "==== print stats of buffer allocator ====\n");
|
| + s_state->heap(ThreadState::Vector1HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::Vector2HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::Vector3HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::Vector4HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::InlineVectorHeapIndex)->printStats();
|
| + s_state->heap(ThreadState::HashTable1HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::HashTable2HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::HashTable3HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::HashTable4HeapIndex)->printStats();
|
| + s_state->heap(ThreadState::BufferStringHeapIndex)->printStats();
|
| + fprintf(stderr, "=========================================\n");
|
| +}
|
| +
|
| +void BufferAllocator::backingFree(void* address)
|
| +{
|
| + if (!address)
|
| + return;
|
| +
|
| + spinLockLock(&s_lock);
|
| +
|
| + // Don't promptly free large objects because their page is never reused.
|
| + // Don't free backings allocated on other threads.
|
| + BasePage* page = pageFromObject(address);
|
| + if (page->isLargeObjectPage()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingFree: large page\n");
|
| +#endif
|
| +
|
| + page->heap()->freePage(page);
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return;
|
| + }
|
| +
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingFree: begin\n");
|
| +#endif
|
| + HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
|
| + ASSERT(header->checkHeader());
|
| + NormalPageHeap* heap = static_cast<NormalPage*>(page)->heapForNormalPage();
|
| + s_state->promptlyFreed(header->gcInfoIndex());
|
| + heap->promptlyFreeObject(header);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingFree: end\n");
|
| +#endif
|
| +
|
| + if (page->isEmpty())
|
| + page->heap()->freePage(page);
|
| +
|
| + spinLockUnlock(&s_lock);
|
| +}
|
| +
|
| +bool BufferAllocator::backingExpand(void* address, size_t newSize)
|
| +{
|
| + if (!address)
|
| + return false;
|
| +
|
| + spinLockLock(&s_lock);
|
| +
|
| + // FIXME: Support expand for large objects.
|
| + // Don't expand backings allocated on other threads.
|
| + BasePage* page = pageFromObject(address);
|
| + if (page->isLargeObjectPage()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingExpand: large page\n");
|
| +#endif
|
| + spinLockUnlock(&s_lock);
|
| + return false;
|
| + }
|
| +
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingExpand: begin\n");
|
| +#endif
|
| + HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
|
| + ASSERT(header->checkHeader());
|
| + NormalPageHeap* heap = static_cast<NormalPage*>(page)->heapForNormalPage();
|
| + bool succeed = heap->expandObject(header, newSize);
|
| + if (succeed)
|
| + s_state->allocationPointAdjusted(heap->heapIndex());
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingExpand: end\n");
|
| +#endif
|
| + spinLockUnlock(&s_lock);
|
| + return succeed;
|
| +}
|
| +
|
| +bool BufferAllocator::backingShrink(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize)
|
| +{
|
| + ASSERT(quantizedCurrentSize >= quantizedShrunkSize);
|
| + // We shrink the object only if the shrinking will make a non-small
|
| + // prompt-free block.
|
| + // FIXME: Optimize the threshold size.
|
| + if (quantizedCurrentSize <= quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32)
|
| + return true;
|
| +
|
| + if (!address)
|
| + return true;
|
| +
|
| + spinLockLock(&s_lock);
|
| +
|
| + // FIXME: Support shrink for large objects.
|
| + // Don't shrink backings allocated on other threads.
|
| + BasePage* page = pageFromObject(address);
|
| + if (page->isLargeObjectPage()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingShrink: large page\n");
|
| +#endif
|
| + spinLockUnlock(&s_lock);
|
| + return false;
|
| + }
|
| +
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingShrink: begin\n");
|
| +#endif
|
| + HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
|
| + ASSERT(header->checkHeader());
|
| + NormalPageHeap* heap = static_cast<NormalPage*>(page)->heapForNormalPage();
|
| + bool succeededAtAllocationPoint = heap->shrinkObject(header, quantizedShrunkSize);
|
| + if (succeededAtAllocationPoint)
|
| + s_state->allocationPointAdjusted(heap->heapIndex());
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "backingShrink: end\n");
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return true;
|
| +}
|
| +
|
| +void BufferAllocator::freeVectorBacking(void* address)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "freeVectorBacking: address=%p\n", address);
|
| +#endif
|
| + backingFree(address);
|
| +}
|
| +
|
| +bool BufferAllocator::expandVectorBacking(void* address, size_t newSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandVectorBacking: address=%p, newSize=%ld\n", address, newSize);
|
| +#endif
|
| + return backingExpand(address, newSize);
|
| +}
|
| +
|
| +bool BufferAllocator::shrinkVectorBacking(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "shrinkVectorBacking: address=%p, currentSize=%ld, shrunkSize=%ld\n", address, quantizedCurrentSize, quantizedShrunkSize);
|
| +#endif
|
| + return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
|
| +}
|
| +
|
| +void BufferAllocator::freeInlineVectorBacking(void* address)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "freeInlineVectorBacking: address=%p\n", address);
|
| +#endif
|
| + backingFree(address);
|
| +}
|
| +
|
| +bool BufferAllocator::expandInlineVectorBacking(void* address, size_t newSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandInlineVectorBacking: address=%p, newSize=%ld\n", address, newSize);
|
| +#endif
|
| + return backingExpand(address, newSize);
|
| +}
|
| +
|
| +bool BufferAllocator::shrinkInlineVectorBacking(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "shrinkInlineVectorBacking: address=%p\n", address);
|
| +#endif
|
| + return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
|
| +}
|
| +
|
| +void BufferAllocator::freeHashTableBacking(void* address)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "freeHashTableBacking: address=%p\n", address);
|
| +#endif
|
| + backingFree(address);
|
| +}
|
| +
|
| +bool BufferAllocator::expandHashTableBacking(void* address, size_t newSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandHashTableBacking: address=%p, newSize=%ld\n", address, newSize);
|
| +#endif
|
| + // return false;
|
| + return backingExpand(address, newSize);
|
| +}
|
| +
|
| +void BufferAllocator::freeBufferStringBacking(void* address)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "freeBufferStringBacking: address=%p\n", address);
|
| +#endif
|
| + backingFree(address);
|
| +}
|
| +
|
| +bool BufferAllocator::expandBufferStringBacking(void* address, size_t newSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandBufferStringBacking: address=%p, newSize=%ld\n", address, newSize);
|
| +#endif
|
| + return backingExpand(address, newSize);
|
| +}
|
| +
|
| +bool BufferAllocator::shrinkBufferStringBacking(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "shrinkBufferStringBacking: address=%p, currentSize=%ld, shrunkSize=%ld\n", address, quantizedCurrentSize, quantizedShrunkSize);
|
| +#endif
|
| + return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
|
| +}
|
| +
|
| +} // namespace blink
|
|
|