Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/heap/spaces.cc

Issue 1382003002: [heap] Divide available memory upon compaction tasks (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters
Patch Set: Rebase + addressed comment Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 964 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 PageIterator iterator(this); 975 PageIterator iterator(this);
976 while (iterator.has_next()) { 976 while (iterator.has_next()) {
977 heap()->isolate()->memory_allocator()->Free(iterator.next()); 977 heap()->isolate()->memory_allocator()->Free(iterator.next());
978 } 978 }
979 anchor_.set_next_page(&anchor_); 979 anchor_.set_next_page(&anchor_);
980 anchor_.set_prev_page(&anchor_); 980 anchor_.set_prev_page(&anchor_);
981 accounting_stats_.Clear(); 981 accounting_stats_.Clear();
982 } 982 }
983 983
984 984
985 void PagedSpace::AddMemory(Address start, intptr_t size) {
986 accounting_stats_.ExpandSpace(static_cast<int>(size));
987 Free(start, static_cast<int>(size));
988 }
989
990
991 FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
992 FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
993 if (free_space != nullptr) {
994 accounting_stats_.DecreaseCapacity(free_space->size());
995 }
996 return free_space;
997 }
998
999
1000 void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
1001 int num, intptr_t limit) {
1002 DCHECK_GT(num, 0);
1003 DCHECK(other != nullptr);
1004
1005 if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
1006
1007 EmptyAllocationInfo();
1008
1009 bool memory_available = true;
1010 bool spaces_need_memory = true;
1011 FreeSpace* node = nullptr;
1012 CompactionSpace* current_space = nullptr;
1013 // Iterate over spaces and memory as long as we have memory and there are
1014 // spaces in need of some.
1015 while (memory_available && spaces_need_memory) {
1016 spaces_need_memory = false;
1017 // Round-robin over all spaces.
1018 for (int i = 0; i < num; i++) {
1019 current_space = other[i]->Get(identity());
1020 if (current_space->free_list()->available() < limit) {
1021 // Space has not reached its limit. Try to get some memory.
1022 spaces_need_memory = true;
1023 node = TryRemoveMemory(limit - current_space->free_list()->available());
1024 if (node != nullptr) {
1025 CHECK(current_space->identity() == identity());
1026 current_space->AddMemory(node->address(), node->size());
1027 } else {
1028 memory_available = false;
1029 break;
1030 }
1031 }
1032 }
1033 }
1034 }
1035
1036
1037 void PagedSpace::RefillFreeList() {
1038 MarkCompactCollector* collector = heap()->mark_compact_collector();
1039 FreeList* free_list = nullptr;
1040 if (this == heap()->old_space()) {
1041 free_list = collector->free_list_old_space().get();
1042 } else if (this == heap()->code_space()) {
1043 free_list = collector->free_list_code_space().get();
1044 } else if (this == heap()->map_space()) {
1045 free_list = collector->free_list_map_space().get();
1046 } else {
1047 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1048 // generation spaces out.
1049 return;
1050 }
1051 DCHECK(free_list != nullptr);
1052 intptr_t added = free_list_.Concatenate(free_list);
1053 accounting_stats_.IncreaseCapacity(added);
1054 }
1055
1056
1057 void CompactionSpace::RefillFreeList() {
1058 MarkCompactCollector* collector = heap()->mark_compact_collector();
1059 FreeList* free_list = nullptr;
1060 if (identity() == OLD_SPACE) {
1061 free_list = collector->free_list_old_space().get();
1062 } else if (identity() == CODE_SPACE) {
1063 free_list = collector->free_list_code_space().get();
1064 } else {
1065 // Compaction spaces only represent old or code space.
1066 UNREACHABLE();
1067 }
1068 DCHECK(free_list != nullptr);
1069 intptr_t refilled = 0;
1070 while (refilled < kCompactionMemoryWanted) {
1071 FreeSpace* node =
1072 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1073 if (node == nullptr) return;
1074 refilled += node->size();
1075 AddMemory(node->address(), node->size());
1076 }
1077 }
1078
1079
985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { 1080 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
986 DCHECK(identity() == other->identity()); 1081 DCHECK(identity() == other->identity());
987 // Destroy the linear allocation space of {other}. This is needed to 1082 // Destroy the linear allocation space of {other}. This is needed to
988 // (a) not waste the memory and 1083 // (a) not waste the memory and
989 // (b) keep the rest of the chunk in an iterable state (filler is needed). 1084 // (b) keep the rest of the chunk in an iterable state (filler is needed).
990 other->EmptyAllocationInfo(); 1085 other->EmptyAllocationInfo();
991 1086
992 // Move over the free list. Concatenate makes sure that the source free list 1087 // Move over the free list. Concatenate makes sure that the source free list
993 // gets properly reset after moving over all nodes. 1088 // gets properly reset after moving over all nodes.
994 intptr_t added = free_list_.Concatenate(other->free_list()); 1089 intptr_t added = free_list_.Concatenate(other->free_list());
995 1090
996 // Moved memory is not recorded as allocated memory, but rather increases and 1091 // Moved memory is not recorded as allocated memory, but rather increases and
997 // decreases capacity of the corresponding spaces. Used size and waste size 1092 // decreases capacity of the corresponding spaces.
998 // are maintained by the receiving space upon allocating and freeing blocks.
999 other->accounting_stats_.DecreaseCapacity(added); 1093 other->accounting_stats_.DecreaseCapacity(added);
1000 accounting_stats_.IncreaseCapacity(added); 1094 accounting_stats_.IncreaseCapacity(added);
1001 } 1095 }
1002 1096
1003 1097
1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1098 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1005 // Unmerged fields: 1099 // Unmerged fields:
1006 // area_size_ 1100 // area_size_
1007 // allocation_info_
1008 // end_of_unswept_pages_
1009 // unswept_free_bytes_
1010 // anchor_ 1101 // anchor_
1011 1102
1012 MoveOverFreeMemory(other); 1103 MoveOverFreeMemory(other);
1013 1104
1014 // Update and clear accounting statistics. 1105 // Update and clear accounting statistics.
1015 accounting_stats_.Merge(other->accounting_stats_); 1106 accounting_stats_.Merge(other->accounting_stats_);
1016 other->accounting_stats_.Reset(); 1107 other->accounting_stats_.Clear();
1108
1109 // The linear allocation area of {other} should be destroyed now.
1110 DCHECK(other->top() == nullptr);
1111 DCHECK(other->limit() == nullptr);
1112
1113 DCHECK(other->end_of_unswept_pages_ == nullptr);
1017 1114
1018 AccountCommitted(other->CommittedMemory()); 1115 AccountCommitted(other->CommittedMemory());
1019 1116
1020 // Move over pages. 1117 // Move over pages.
1021 PageIterator it(other); 1118 PageIterator it(other);
1022 Page* p = nullptr; 1119 Page* p = nullptr;
1023 while (it.has_next()) { 1120 while (it.has_next()) {
1024 p = it.next(); 1121 p = it.next();
1025 p->Unlink(); 1122 p->Unlink();
1026 p->set_owner(this); 1123 p->set_owner(this);
(...skipping 1358 matching lines...) Expand 10 before | Expand all | Expand 10 after
2385 page = Page::FromAddress(node->address()); 2482 page = Page::FromAddress(node->address());
2386 page->add_available_in_large_free_list(-(*node_size)); 2483 page->add_available_in_large_free_list(-(*node_size));
2387 } 2484 }
2388 } 2485 }
2389 2486
2390 DCHECK(IsVeryLong() || available() == SumFreeLists()); 2487 DCHECK(IsVeryLong() || available() == SumFreeLists());
2391 return node; 2488 return node;
2392 } 2489 }
2393 2490
2394 2491
2492 FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2493 base::LockGuard<base::Mutex> guard(&mutex_);
2494 FreeSpace* node = nullptr;
2495 int node_size = 0;
2496 // Try to find a node that fits exactly.
2497 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2498 // If no node could be found get as much memory as possible.
2499 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2500 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2501 if (node != nullptr) {
2502 // Give back left overs that were not required by {size_in_bytes}.
2503 intptr_t aligned_size = RoundUp(hint_size_in_bytes, kPointerSize);
2504 intptr_t left_over = node_size - aligned_size;
2505 if (left_over > 0) {
2506 Free(node->address() + aligned_size, static_cast<int>(left_over));
2507 node->set_size(static_cast<int>(aligned_size));
2508 }
2509 }
2510 return node;
2511 }
2512
2513
2395 // Allocation on the old space free list. If it succeeds then a new linear 2514 // Allocation on the old space free list. If it succeeds then a new linear
2396 // allocation space has been set up with the top and limit of the space. If 2515 // allocation space has been set up with the top and limit of the space. If
2397 // the allocation fails then NULL is returned, and the caller can perform a GC 2516 // the allocation fails then NULL is returned, and the caller can perform a GC
2398 // or allocate a new page before retrying. 2517 // or allocate a new page before retrying.
2399 HeapObject* FreeList::Allocate(int size_in_bytes) { 2518 HeapObject* FreeList::Allocate(int size_in_bytes) {
2400 DCHECK(0 < size_in_bytes); 2519 DCHECK(0 < size_in_bytes);
2401 DCHECK(size_in_bytes <= kMaxBlockSize); 2520 DCHECK(size_in_bytes <= kMaxBlockSize);
2402 DCHECK(IsAligned(size_in_bytes, kPointerSize)); 2521 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2403 // Don't free list allocate if there is linear space available. 2522 // Don't free list allocate if there is linear space available.
2404 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); 2523 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2620 2739
2621 2740
2622 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2741 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2623 // Allocation in this space has failed. 2742 // Allocation in this space has failed.
2624 2743
2625 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2744 MarkCompactCollector* collector = heap()->mark_compact_collector();
2626 // Sweeping is still in progress. 2745 // Sweeping is still in progress.
2627 if (collector->sweeping_in_progress()) { 2746 if (collector->sweeping_in_progress()) {
2628 // First try to refill the free-list, concurrent sweeper threads 2747 // First try to refill the free-list, concurrent sweeper threads
2629 // may have freed some objects in the meantime. 2748 // may have freed some objects in the meantime.
2630 collector->RefillFreeList(this); 2749 RefillFreeList();
2631 2750
2632 // Retry the free list allocation. 2751 // Retry the free list allocation.
2633 HeapObject* object = free_list_.Allocate(size_in_bytes); 2752 HeapObject* object = free_list_.Allocate(size_in_bytes);
2634 if (object != NULL) return object; 2753 if (object != NULL) return object;
2635 2754
2636 // If sweeping is still in progress try to sweep pages on the main thread. 2755 // If sweeping is still in progress try to sweep pages on the main thread.
2637 int free_chunk = collector->SweepInParallel(this, size_in_bytes); 2756 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2638 collector->RefillFreeList(this); 2757 RefillFreeList();
2639 if (free_chunk >= size_in_bytes) { 2758 if (free_chunk >= size_in_bytes) {
2640 HeapObject* object = free_list_.Allocate(size_in_bytes); 2759 HeapObject* object = free_list_.Allocate(size_in_bytes);
2641 // We should be able to allocate an object here since we just freed that 2760 // We should be able to allocate an object here since we just freed that
2642 // much memory. 2761 // much memory.
2643 DCHECK(object != NULL); 2762 DCHECK(object != NULL);
2644 if (object != NULL) return object; 2763 if (object != NULL) return object;
2645 } 2764 }
2646 } 2765 }
2647 2766
2648 // Free list allocation failed and there is no next page. Fail if we have 2767 // Free list allocation failed and there is no next page. Fail if we have
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
3158 object->ShortPrint(); 3277 object->ShortPrint();
3159 PrintF("\n"); 3278 PrintF("\n");
3160 } 3279 }
3161 printf(" --------------------------------------\n"); 3280 printf(" --------------------------------------\n");
3162 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3281 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3163 } 3282 }
3164 3283
3165 #endif // DEBUG 3284 #endif // DEBUG
3166 } // namespace internal 3285 } // namespace internal
3167 } // namespace v8 3286 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698