Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index ff5a704e49aab49b030295a7a3584b6ec5caa9ba..97f16e49135af03dcbf2e4bbddae0c35ec831917 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -6,11 +6,13 @@ |
#include "src/base/bits.h" |
#include "src/base/platform/platform.h" |
+#include "src/base/platform/semaphore.h" |
#include "src/full-codegen/full-codegen.h" |
#include "src/heap/slot-set.h" |
#include "src/macro-assembler.h" |
#include "src/msan.h" |
#include "src/snapshot/snapshot.h" |
+#include "src/v8.h" |
namespace v8 { |
namespace internal { |
@@ -303,7 +305,8 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate) |
size_(0), |
size_executable_(0), |
lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
- highest_ever_allocated_(reinterpret_cast<void*>(0)) {} |
+ highest_ever_allocated_(reinterpret_cast<void*>(0)), |
+ unmapper_(this) {} |
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable, |
intptr_t code_range_size) { |
@@ -322,10 +325,14 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable, |
void MemoryAllocator::TearDown() { |
- for (MemoryChunk* chunk : chunk_pool_) { |
+ unmapper()->WaitUntilCompleted(); |
+ |
+ MemoryChunk* chunk = nullptr; |
+ while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) { |
FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, |
NOT_EXECUTABLE); |
} |
+ |
// Check that spaces were torn down before MemoryAllocator. |
DCHECK_EQ(size_.Value(), 0); |
// TODO(gc) this will be true again when we fix FreeMemory. |
@@ -341,6 +348,55 @@ void MemoryAllocator::TearDown() { |
code_range_ = nullptr; |
} |
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task { |
+ public: |
+ explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {} |
+ |
+ private: |
+ // v8::Task overrides. |
+ void Run() override { |
+ unmapper_->PerformFreeMemoryOnQueuedChunks(); |
+ unmapper_->pending_unmapping_tasks_semaphore_.Signal(); |
+ } |
+ |
+ Unmapper* unmapper_; |
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask); |
+}; |
+ |
+void MemoryAllocator::Unmapper::FreeQueuedChunks() { |
+ if (FLAG_concurrent_sweeping) { |
+ V8::GetCurrentPlatform()->CallOnBackgroundThread( |
+ new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask); |
+ concurrent_unmapping_tasks_active_++; |
+ } else { |
+ PerformFreeMemoryOnQueuedChunks(); |
+ } |
+} |
+ |
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() { |
+ bool waited = false; |
+ while (concurrent_unmapping_tasks_active_ > 0) { |
+ pending_unmapping_tasks_semaphore_.Wait(); |
+ concurrent_unmapping_tasks_active_--; |
+ waited = true; |
+ } |
+ return waited; |
+} |
+ |
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { |
+ MemoryChunk* chunk = nullptr; |
+ // Regular chunks. |
+ while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) { |
+ bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); |
+ allocator_->PerformFreeMemory(chunk); |
+ if (pooled) AddMemoryChunkSafe<kPooled>(chunk); |
+ } |
+ // Non-regular chunks. |
+ while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) { |
+ allocator_->PerformFreeMemory(chunk); |
+ } |
+} |
+ |
bool MemoryAllocator::CommitMemory(Address base, size_t size, |
Executability executable) { |
if (!base::VirtualMemory::CommitRegion(base, size, |
@@ -748,28 +804,45 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
chunk->ReleaseAllocatedMemory(); |
base::VirtualMemory* reservation = chunk->reserved_memory(); |
- if (reservation->IsReserved()) { |
- FreeMemory(reservation, chunk->executable()); |
+ if (chunk->IsFlagSet(MemoryChunk::POOLED)) { |
+ UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); |
} else { |
- FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
+ if (reservation->IsReserved()) { |
+ FreeMemory(reservation, chunk->executable()); |
+ } else { |
+ FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
+ } |
} |
} |
-template <MemoryAllocator::AllocationMode mode> |
+template <MemoryAllocator::FreeMode mode> |
void MemoryAllocator::Free(MemoryChunk* chunk) { |
- if (mode == kRegular) { |
- PreFreeMemory(chunk); |
- PerformFreeMemory(chunk); |
- } else { |
- DCHECK_EQ(mode, kPooled); |
- FreePooled(chunk); |
+ switch (mode) { |
+ case kFull: |
+ PreFreeMemory(chunk); |
+ PerformFreeMemory(chunk); |
+ break; |
+ case kPooledAndQueue: |
+ DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); |
+ DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); |
+ chunk->SetFlag(MemoryChunk::POOLED); |
+ // Fall through to kPreFreeAndQueue. |
+ case kPreFreeAndQueue: |
+ PreFreeMemory(chunk); |
+ // The chunks added to this queue will be freed by a concurrent thread. |
+ unmapper()->AddMemoryChunkSafe(chunk); |
+ break; |
+ default: |
+ UNREACHABLE(); |
} |
} |
-template void MemoryAllocator::Free<MemoryAllocator::kRegular>( |
+template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk); |
+ |
+template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>( |
MemoryChunk* chunk); |
-template void MemoryAllocator::Free<MemoryAllocator::kPooled>( |
+template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>( |
MemoryChunk* chunk); |
template <typename PageType, MemoryAllocator::AllocationMode mode, |
@@ -803,9 +876,9 @@ template NewSpacePage* MemoryAllocator::AllocatePage< |
template <typename SpaceType> |
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { |
- if (chunk_pool_.is_empty()) return nullptr; |
+ MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe(); |
+ if (chunk == nullptr) return nullptr; |
const int size = MemoryChunk::kPageSize; |
- MemoryChunk* chunk = chunk_pool_.RemoveLast(); |
const Address start = reinterpret_cast<Address>(chunk); |
const Address area_start = start + MemoryChunk::kObjectStartOffset; |
const Address area_end = start + size; |
@@ -817,18 +890,6 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { |
return chunk; |
} |
-void MemoryAllocator::FreePooled(MemoryChunk* chunk) { |
- DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); |
- DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); |
- chunk_pool_.Add(chunk); |
- intptr_t chunk_size = static_cast<intptr_t>(chunk->size()); |
- if (chunk->executable() == EXECUTABLE) { |
- size_executable_.Increment(-chunk_size); |
- } |
- size_.Increment(-chunk_size); |
- UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); |
-} |
- |
bool MemoryAllocator::CommitBlock(Address start, size_t size, |
Executability executable) { |
if (!CommitMemory(start, size, executable)) return false; |
@@ -971,12 +1032,16 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, |
// MemoryChunk implementation |
void MemoryChunk::ReleaseAllocatedMemory() { |
- delete skip_list_; |
- skip_list_ = nullptr; |
- delete mutex_; |
- mutex_ = nullptr; |
- ReleaseOldToNewSlots(); |
- ReleaseOldToOldSlots(); |
+ if (skip_list_ != nullptr) { |
+ delete skip_list_; |
+ skip_list_ = nullptr; |
+ } |
+ if (mutex_ != nullptr) { |
+ delete mutex_; |
+ mutex_ = nullptr; |
+ } |
+ if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots(); |
+ if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots(); |
} |
static SlotSet* AllocateSlotSet(size_t size, Address page_start) { |
@@ -1060,7 +1125,7 @@ bool PagedSpace::HasBeenSetUp() { return true; } |
void PagedSpace::TearDown() { |
PageIterator iterator(this); |
while (iterator.has_next()) { |
- heap()->memory_allocator()->Free(iterator.next()); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next()); |
} |
anchor_.set_next_page(&anchor_); |
anchor_.set_prev_page(&anchor_); |
@@ -1251,7 +1316,7 @@ void PagedSpace::ReleasePage(Page* page) { |
} |
AccountUncommitted(static_cast<intptr_t>(page->size())); |
- heap()->QueueMemoryChunkForFree(page); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
DCHECK(Capacity() > 0); |
accounting_stats_.ShrinkSpace(AreaSize()); |
@@ -1715,12 +1780,14 @@ bool SemiSpace::Uncommit() { |
DCHECK(is_committed()); |
NewSpacePageIterator it(this); |
while (it.has_next()) { |
- heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(it.next()); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( |
+ it.next()); |
} |
anchor()->set_next_page(anchor()); |
anchor()->set_prev_page(anchor()); |
AccountUncommitted(current_capacity_); |
committed_ = false; |
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
return true; |
} |
@@ -1798,10 +1865,12 @@ bool SemiSpace::ShrinkTo(int new_capacity) { |
new_last_page = last_page->prev_page(); |
new_last_page->set_next_page(anchor()); |
anchor()->set_prev_page(new_last_page); |
- heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(last_page); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( |
+ last_page); |
delta_pages--; |
} |
AccountUncommitted(static_cast<intptr_t>(delta)); |
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
} |
current_capacity_ = new_capacity; |
return true; |
@@ -2894,7 +2963,7 @@ void LargeObjectSpace::TearDown() { |
ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); |
heap()->memory_allocator()->PerformAllocationCallback( |
space, kAllocationActionFree, page->size()); |
- heap()->memory_allocator()->Free(page); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page); |
} |
SetUp(); |
} |
@@ -3037,7 +3106,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { |
static_cast<uint32_t>(key)); |
} |
- heap()->QueueMemoryChunkForFree(page); |
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
} |
} |
} |