Chromium Code Reviews| Index: src/heap/spaces.h |
| diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
| index 791ee6c32ac410b63ff4de43ab2d27eec113a4dd..84ac160e27d1276d3b21f775ec421a1a6d1bcc0c 100644 |
| --- a/src/heap/spaces.h |
| +++ b/src/heap/spaces.h |
| @@ -5,6 +5,8 @@ |
| #ifndef V8_HEAP_SPACES_H_ |
| #define V8_HEAP_SPACES_H_ |
| +#include <list> |
| + |
| #include "src/allocation.h" |
| #include "src/atomic-utils.h" |
| #include "src/base/atomicops.h" |
| @@ -441,6 +443,10 @@ class MemoryChunk { |
| // still has to be performed. |
| PRE_FREED, |
| + // |POOLED|: When actually freeing this chunk, only uncommit and do not |
| + // give up the reservation as we still reuse the chunk at some point. |
| + POOLED, |
| + |
| // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
| // has been aborted and needs special handling by the sweeper. |
| COMPACTION_WAS_ABORTED, |
| @@ -1256,15 +1262,93 @@ class SkipList { |
| // A space acquires chunks of memory from the operating system. The memory |
| // allocator allocated and deallocates pages for the paged heap spaces and large |
| // pages for large object space. |
| -// |
| -// Each space has to manage it's own pages. |
| -// |
| class MemoryAllocator { |
| public: |
| + // Unmapper takes care of concurrently unmapping and uncommitting memory |
| + // chunks. |
| + class Unmapper { |
| + public: |
| + class UnmapFreeMemoryTask; |
| + |
| + explicit Unmapper(MemoryAllocator* allocator) |
| + : allocator_(allocator), |
| + pending_unmapping_tasks_semaphore_(0), |
| + concurrent_unmapping_tasks_active_(0) {} |
| + |
| + void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| + if ((chunk->size() == Page::kPageSize) && |
| + (chunk->executable() == EXECUTABLE)) { |
| + AddMemoryChunkSafe<kRegularProcess>(chunk); |
| + } else { |
| + AddMemoryChunkSafe<kNonRegularProcess>(chunk); |
| + } |
| + } |
| + |
| + MemoryChunk* TryGetPooledMemoryChunkSafe() { |
| + // Procedure: |
| + // (1) Try to get a chunk that was declared as pooled and already has |
| + // been uncommitted. |
| + // (2) Try to steal any memory chunk of kPageSize that would've been |
| + // unmapped. |
| + MemoryChunk* chunk = GetMemoryChunkSafe<kPooledOutput>(); |
| + if (chunk == nullptr) { |
| + chunk = GetMemoryChunkSafe<kRegularProcess>(); |
| + if (chunk != nullptr) { |
| + // For stolen chunks we need to manually free any allocated memory. |
| + chunk->ReleaseAllocatedMemory(); |
| + } |
| + } |
| + return chunk; |
| + } |
| + |
| + void FreeQueuedChunks(); |
| + bool WaitUntilCompleted(); |
| + |
| + private: |
| + enum ChunkQueueType { |
| + kRegularProcess, // Pages of kPageSize that do not live in a CodeRange |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kRegular
Michael Lippautz
2016/04/26 13:57:51
Done.
|
| + // and can thus be used for stealing. |
| + kNonRegularProcess, // Large chunks and executable chunks. |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kNonRegular
Michael Lippautz
2016/04/26 13:57:51
Done.
|
| + kPooledOutput, // Pooled chunks, already uncommited and ready for |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kPooled
Michael Lippautz
2016/04/26 13:57:51
Done.
|
| + // reuse. |
| + kNumberOfChunkQueues, |
| + }; |
| + |
| + template <ChunkQueueType type> |
| + void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + chunks_[type].push_back(chunk); |
| + } |
| + |
| + template <ChunkQueueType type> |
| + MemoryChunk* GetMemoryChunkSafe() { |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + if (chunks_[type].empty()) return nullptr; |
| + MemoryChunk* chunk = chunks_[type].front(); |
| + chunks_[type].pop_front(); |
| + return chunk; |
| + } |
| + |
| + void PerformFreeMemoryOnQueuedChunks(); |
| + |
| + base::Mutex mutex_; |
| + MemoryAllocator* allocator_; |
| + std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
| + base::Semaphore pending_unmapping_tasks_semaphore_; |
| + intptr_t concurrent_unmapping_tasks_active_; |
| + |
| + friend class MemoryAllocator; |
| + }; |
| + |
| enum AllocationMode { |
| kRegular, |
| kPooled, |
| }; |
| + enum FreeMode { |
| + kFull, |
| + kPreFreeAndQueue, |
| + kPooledAndQueue, |
| + }; |
| explicit MemoryAllocator(Isolate* isolate); |
| @@ -1283,16 +1367,7 @@ class MemoryAllocator { |
| PageType* AllocatePage(intptr_t size, SpaceType* owner, |
| Executability executable); |
| - // PreFree logically frees the object, i.e., it takes care of the size |
| - // bookkeeping and calls the allocation callback. |
| - void PreFreeMemory(MemoryChunk* chunk); |
| - |
| - // FreeMemory can be called concurrently when PreFree was executed before. |
| - void PerformFreeMemory(MemoryChunk* chunk); |
| - |
| - // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and |
| - // PerformFreeMemory together. For kPooled it will dispatch to pooled free. |
| - template <MemoryAllocator::AllocationMode mode = kRegular> |
| + template <MemoryAllocator::FreeMode mode = kFull> |
| void Free(MemoryChunk* chunk); |
| // Returns allocated spaces in bytes. |
| @@ -1398,16 +1473,21 @@ class MemoryAllocator { |
| size_t reserved_size); |
| CodeRange* code_range() { return code_range_; } |
| + Unmapper* unmapper() { return &unmapper_; } |
| private: |
| + // PreFree logically frees the object, i.e., it takes care of the size |
| + // bookkeeping and calls the allocation callback. |
| + void PreFreeMemory(MemoryChunk* chunk); |
| + |
| + // FreeMemory can be called concurrently when PreFree was executed before. |
| + void PerformFreeMemory(MemoryChunk* chunk); |
| + |
| // See AllocatePage for public interface. Note that currently we only support |
| // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. |
| template <typename SpaceType> |
| MemoryChunk* AllocatePagePooled(SpaceType* owner); |
| - // Free that chunk into the pool. |
| - void FreePooled(MemoryChunk* chunk); |
| - |
| Isolate* isolate_; |
| CodeRange* code_range_; |
| @@ -1463,9 +1543,8 @@ class MemoryAllocator { |
| } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
| } |
| - List<MemoryChunk*> chunk_pool_; |
| - |
| base::VirtualMemory last_chunk_; |
| + Unmapper unmapper_; |
| friend class TestCodeRangeScope; |