| Index: Source/platform/heap/BufferAllocator.h
|
| diff --git a/Source/platform/heap/BufferAllocator.h b/Source/platform/heap/BufferAllocator.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..a390413bb6c5df2ccc49aa3e0223657031d93392
|
| --- /dev/null
|
| +++ b/Source/platform/heap/BufferAllocator.h
|
| @@ -0,0 +1,186 @@
|
| +// Copyright 2015 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#ifndef BufferAllocator_h
|
| +#define BufferAllocator_h
|
| +
|
| +#include "platform/heap/GCInfo.h"
|
| +#include "platform/heap/Heap.h"
|
| +#include "wtf/Assertions.h"
|
| +
|
| +namespace blink {
|
| +
|
| +class PLATFORM_EXPORT BufferAllocator final {
|
| +public:
|
| + static int s_lock;
|
| + static ThreadState* s_state;
|
| + static bool s_ignoreHitRateCount;
|
| + static const int gcInfoIndexMask = GCInfoTable::maxIndex - 1;
|
| +
|
| + static void initialize();
|
| + static void shutdown();
|
| +
|
| + static bool isOnStack(void* holder)
|
| + {
|
| + int stackPtr;
|
| + return reinterpret_cast<uintptr_t>(holder) - reinterpret_cast<uintptr_t>(&stackPtr) < 128 * 1024;
|
| + }
|
| +
|
| + static size_t quantizedSize(size_t count, size_t elementSize)
|
| + {
|
| + RELEASE_ASSERT(count <= maxHeapObjectSize / elementSize);
|
| + return Heap::allocationSizeFromSize(count * elementSize) - sizeof(HeapObjectHeader);
|
| + }
|
| +
|
| + static void* allocateVectorBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = isOnStack(holder) ? 1 : 2;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->vectorBackingHeap(gcInfoIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateVectorBacking: gcInfoIndex=%ld, size=%ld\n", gcInfoIndex, size);
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateVectorBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void* allocateExpandedVectorBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = isOnStack(holder) ? 1 : 2;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->expandedVectorBackingHeap(gcInfoIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateExpandedVectorBacking: gcInfoIndex=%ld, size=%ld\n", gcInfoIndex, size);
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateExpandedVectorBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void freeVectorBacking(void*);
|
| + static bool expandVectorBacking(void*, size_t);
|
| + static bool shrinkVectorBacking(void*, size_t quantizedCurrentSize, size_t quantizedShrunkSize);
|
| +
|
| + static void* allocateInlineVectorBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = isOnStack(holder) ? 3 : 4;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->heap(ThreadState::InlineVectorHeapIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateInlineVectorBacking: gcInfoIndex=%ld, size=%ld\n", gcInfoIndex, size);
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateInlineVectorBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void freeInlineVectorBacking(void*);
|
| + static bool expandInlineVectorBacking(void*, size_t);
|
| + static bool shrinkInlineVectorBacking(void*, size_t quantizedCurrentSize, size_t quantizedShrunkSize);
|
| +
|
| + static void* allocateHashTableBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = 5;
|
| + if (!holder)
|
| + gcInfoIndex = 6;
|
| + else if (isOnStack(holder))
|
| + gcInfoIndex = 7;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->hashTableBackingHeap(gcInfoIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateHashTableBacking: gcInfoIndex=%ld, size=%ld\n", gcInfoIndex, size);
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateHashTableBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void* allocateZeroedHashTableBacking(size_t size, void* holder)
|
| + {
|
| + void* result = allocateHashTableBacking(size, holder);
|
| + memset(result, 0, size);
|
| + return result;
|
| + }
|
| + static void* allocateExpandedHashTableBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = 5;
|
| + if (!holder)
|
| + gcInfoIndex = 6;
|
| + else if (isOnStack(holder))
|
| + gcInfoIndex = 7;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->expandedHashTableBackingHeap(gcInfoIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateExpandedHashTableBacking: gcInfoIndex=%ld, size=%ld\n", gcInfoIndex, size);
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateExpandedHashTableBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void* allocateZeroedExpandedHashTableBacking(size_t size, void* holder)
|
| + {
|
| + void* result = allocateExpandedHashTableBacking(size, holder);
|
| + memset(result, 0, size);
|
| + return result;
|
| + }
|
| + static void freeHashTableBacking(void*);
|
| + static bool expandHashTableBacking(void*, size_t);
|
| +
|
| + static void* allocateBufferStringBacking(size_t size, void* holder)
|
| + {
|
| + spinLockLock(&s_lock);
|
| +
|
| + size_t gcInfoIndex = isOnStack(holder) ? 8 : 9;
|
| + NormalPageHeap* heap = static_cast<NormalPageHeap*>(s_state->heap(ThreadState::BufferStringHeapIndex));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateBufferStringBacking: gcInfoIndex=%ld, size=%ld, isOnStack=%d\n", gcInfoIndex, size, isOnStack(holder));
|
| +#endif
|
| + void* address = heap->allocateObject(Heap::allocationSizeFromSize(size), gcInfoIndex);
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "allocateBufferStringBacking: address=%p\n", address);
|
| +#endif
|
| +
|
| + spinLockUnlock(&s_lock);
|
| + return address;
|
| + }
|
| + static void freeBufferStringBacking(void*);
|
| + static bool expandBufferStringBacking(void*, size_t);
|
| + static bool shrinkBufferStringBacking(void*, size_t quantizedCurrentSize, size_t quantizedShrunkSize);
|
| +
|
| + static bool ignoreHitRateCount() { return s_ignoreHitRateCount; }
|
| + static void setIgnoreHitRateCount(bool ignore) { s_ignoreHitRateCount = ignore; }
|
| +
|
| + static void printStats();
|
| +
|
| +private:
|
| + static void backingFree(void*);
|
| + static bool backingExpand(void*, size_t);
|
| + static bool backingShrink(void*, size_t quantizedCurrentSize, size_t quantizedShrunkSize);
|
| +};
|
| +
|
| +} // namespace blink
|
| +
|
| +#endif
|
|
|