Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(524)

Side by Side Diff: src/heap/heap.cc

Issue 1303263002: Concurrently unmap free pages. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 old_generation_size_at_last_gc_(0), 128 old_generation_size_at_last_gc_(0),
129 gcs_since_last_deopt_(0), 129 gcs_since_last_deopt_(0),
130 allocation_sites_scratchpad_length_(0), 130 allocation_sites_scratchpad_length_(0),
131 ring_buffer_full_(false), 131 ring_buffer_full_(false),
132 ring_buffer_end_(0), 132 ring_buffer_end_(0),
133 promotion_queue_(this), 133 promotion_queue_(this),
134 configured_(false), 134 configured_(false),
135 current_gc_flags_(Heap::kNoGCFlags), 135 current_gc_flags_(Heap::kNoGCFlags),
136 external_string_table_(this), 136 external_string_table_(this),
137 chunks_queued_for_free_(NULL), 137 chunks_queued_for_free_(NULL),
138 pending_unmap_job_semaphore_(0),
138 gc_callbacks_depth_(0), 139 gc_callbacks_depth_(0),
139 deserialization_complete_(false), 140 deserialization_complete_(false),
140 concurrent_sweeping_enabled_(false), 141 concurrent_sweeping_enabled_(false),
141 strong_roots_list_(NULL) { 142 strong_roots_list_(NULL) {
142 // Allow build-time customization of the max semispace size. Building 143 // Allow build-time customization of the max semispace size. Building
143 // V8 with snapshots and a non-default max semispace size is much 144 // V8 with snapshots and a non-default max semispace size is much
144 // easier if you can define it as part of the build environment. 145 // easier if you can define it as part of the build environment.
145 #if defined(V8_MAX_SEMISPACE_SIZE) 146 #if defined(V8_MAX_SEMISPACE_SIZE)
146 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; 147 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
147 #endif 148 #endif
(...skipping 6499 matching lines...) Expand 10 before | Expand all | Expand 10 after
6647 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i])); 6648 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6648 } 6649 }
6649 new_space_strings_.Free(); 6650 new_space_strings_.Free();
6650 for (int i = 0; i < old_space_strings_.length(); ++i) { 6651 for (int i = 0; i < old_space_strings_.length(); ++i) {
6651 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i])); 6652 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6652 } 6653 }
6653 old_space_strings_.Free(); 6654 old_space_strings_.Free();
6654 } 6655 }
6655 6656
6656 6657
6658 class Heap::UnmapFreeMemoryTask : public v8::Task {
6659 public:
6660 UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
6661 : heap_(heap), head_(head) {}
6662 virtual ~UnmapFreeMemoryTask() {}
6663
6664
Michael Lippautz 2015/08/21 12:28:58 -line
Hannes Payer (out of office) 2015/08/21 12:56:04 Done.
6665 private:
6666 // v8::Task overrides.
6667 void Run() override {
6668 heap_->FreeQueuedChunks(head_);
6669 heap_->pending_unmap_job_semaphore_.Signal();
6670 }
6671
6672 Heap* heap_;
6673 MemoryChunk* head_;
6674
6675 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
6676 };
6677
6678
6679 void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
6680 // We start an unmap job after sweeping and after compaction.
6681 pending_unmap_job_semaphore_.Wait();
6682 pending_unmap_job_semaphore_.Wait();
6683 }
6684
6685
6657 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { 6686 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6658 chunk->set_next_chunk(chunks_queued_for_free_); 6687 chunk->set_next_chunk(chunks_queued_for_free_);
6659 chunks_queued_for_free_ = chunk; 6688 chunks_queued_for_free_ = chunk;
6660 } 6689 }
6661 6690
6662 6691
6663 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() { 6692 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
6664 if (chunks_queued_for_free_ == NULL) return; 6693 if (chunks_queued_for_free_ == NULL) return;
6665 MemoryChunk* next; 6694 MemoryChunk* next;
6666 MemoryChunk* chunk; 6695 MemoryChunk* chunk;
6667 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6696 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6668 next = chunk->next_chunk(); 6697 next = chunk->next_chunk();
6669 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); 6698 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6670 } 6699 }
6671 isolate_->heap()->store_buffer()->Compact(); 6700 store_buffer()->Compact();
6672 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6701 store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6673 } 6702 }
6674 6703
6675 6704
6676 void Heap::FreeQueuedChunks() { 6705 void Heap::FreeQueuedChunks() {
6706 if (chunks_queued_for_free_ != NULL) {
6707 V8::GetCurrentPlatform()->CallOnBackgroundThread(
6708 new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
6709 v8::Platform::kShortRunningTask);
6710 chunks_queued_for_free_ = NULL;
6711 } else {
6712 // If we do not have anything to unmap, we just signal the semaphore
6713 // that we are done.
6714 pending_unmap_job_semaphore_.Signal();
6715 }
6716 }
6717
6718
6719 void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
6677 MemoryChunk* next; 6720 MemoryChunk* next;
6678 MemoryChunk* chunk; 6721 MemoryChunk* chunk;
6679 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6722 for (chunk = list_head; chunk != NULL; chunk = next) {
6680 next = chunk->next_chunk(); 6723 next = chunk->next_chunk();
6681 isolate_->memory_allocator()->Free(chunk); 6724 isolate_->memory_allocator()->Free(chunk);
6682 } 6725 }
6683 chunks_queued_for_free_ = NULL;
6684 } 6726 }
6685 6727
6686 6728
6687 void Heap::RememberUnmappedPage(Address page, bool compacted) { 6729 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6688 uintptr_t p = reinterpret_cast<uintptr_t>(page); 6730 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6689 // Tag the page pointer to make it findable in the dump file. 6731 // Tag the page pointer to make it findable in the dump file.
6690 if (compacted) { 6732 if (compacted) {
6691 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared. 6733 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6692 } else { 6734 } else {
6693 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. 6735 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
6877 *object_type = "CODE_TYPE"; \ 6919 *object_type = "CODE_TYPE"; \
6878 *object_sub_type = "CODE_AGE/" #name; \ 6920 *object_sub_type = "CODE_AGE/" #name; \
6879 return true; 6921 return true;
6880 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6922 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6881 #undef COMPARE_AND_RETURN_NAME 6923 #undef COMPARE_AND_RETURN_NAME
6882 } 6924 }
6883 return false; 6925 return false;
6884 } 6926 }
6885 } // namespace internal 6927 } // namespace internal
6886 } // namespace v8 6928 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698