Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1731)

Side by Side Diff: src/heap/spaces.cc

Issue 1406533002: Revert of [heap] Divide available memory upon compaction tasks (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 964 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 PageIterator iterator(this); 975 PageIterator iterator(this);
976 while (iterator.has_next()) { 976 while (iterator.has_next()) {
977 heap()->isolate()->memory_allocator()->Free(iterator.next()); 977 heap()->isolate()->memory_allocator()->Free(iterator.next());
978 } 978 }
979 anchor_.set_next_page(&anchor_); 979 anchor_.set_next_page(&anchor_);
980 anchor_.set_prev_page(&anchor_); 980 anchor_.set_prev_page(&anchor_);
981 accounting_stats_.Clear(); 981 accounting_stats_.Clear();
982 } 982 }
983 983
984 984
985 void PagedSpace::AddMemory(Address start, intptr_t size) {
986 accounting_stats_.ExpandSpace(static_cast<int>(size));
987 Free(start, static_cast<int>(size));
988 }
989
990
991 FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
992 FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
993 if (free_space != nullptr) {
994 accounting_stats_.DecreaseCapacity(free_space->size());
995 }
996 return free_space;
997 }
998
999
1000 void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
1001 int num, intptr_t limit) {
1002 DCHECK_GT(num, 0);
1003 DCHECK(other != nullptr);
1004
1005 if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
1006
1007 EmptyAllocationInfo();
1008
1009 bool memory_available = true;
1010 bool spaces_need_memory = true;
1011 FreeSpace* node = nullptr;
1012 CompactionSpace* current_space = nullptr;
1013 // Iterate over spaces and memory as long as we have memory and there are
1014 // spaces in need of some.
1015 while (memory_available && spaces_need_memory) {
1016 spaces_need_memory = false;
1017 // Round-robin over all spaces.
1018 for (int i = 0; i < num; i++) {
1019 current_space = other[i]->Get(identity());
1020 if (current_space->free_list()->available() < limit) {
1021 // Space has not reached its limit. Try to get some memory.
1022 spaces_need_memory = true;
1023 node = TryRemoveMemory(limit - current_space->free_list()->available());
1024 if (node != nullptr) {
1025 CHECK(current_space->identity() == identity());
1026 current_space->AddMemory(node->address(), node->size());
1027 } else {
1028 memory_available = false;
1029 break;
1030 }
1031 }
1032 }
1033 }
1034 }
1035
1036
1037 void PagedSpace::RefillFreeList() {
1038 MarkCompactCollector* collector = heap()->mark_compact_collector();
1039 FreeList* free_list = nullptr;
1040 if (this == heap()->old_space()) {
1041 free_list = collector->free_list_old_space().get();
1042 } else if (this == heap()->code_space()) {
1043 free_list = collector->free_list_code_space().get();
1044 } else if (this == heap()->map_space()) {
1045 free_list = collector->free_list_map_space().get();
1046 } else {
1047 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1048 // generation spaces out.
1049 return;
1050 }
1051 DCHECK(free_list != nullptr);
1052 intptr_t added = free_list_.Concatenate(free_list);
1053 accounting_stats_.IncreaseCapacity(added);
1054 }
1055
1056
1057 void CompactionSpace::RefillFreeList() {
1058 MarkCompactCollector* collector = heap()->mark_compact_collector();
1059 FreeList* free_list = nullptr;
1060 if (identity() == OLD_SPACE) {
1061 free_list = collector->free_list_old_space().get();
1062 } else if (identity() == CODE_SPACE) {
1063 free_list = collector->free_list_code_space().get();
1064 } else {
1065 // Compaction spaces only represent old or code space.
1066 UNREACHABLE();
1067 }
1068 DCHECK(free_list != nullptr);
1069 intptr_t refilled = 0;
1070 while (refilled < kCompactionMemoryWanted) {
1071 FreeSpace* node =
1072 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1073 if (node == nullptr) return;
1074 refilled += node->size();
1075 AddMemory(node->address(), node->size());
1076 }
1077 }
1078
1079
1080 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { 985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
1081 DCHECK(identity() == other->identity()); 986 DCHECK(identity() == other->identity());
1082 // Destroy the linear allocation space of {other}. This is needed to 987 // Destroy the linear allocation space of {other}. This is needed to
1083 // (a) not waste the memory and 988 // (a) not waste the memory and
1084 // (b) keep the rest of the chunk in an iterable state (filler is needed). 989 // (b) keep the rest of the chunk in an iterable state (filler is needed).
1085 other->EmptyAllocationInfo(); 990 other->EmptyAllocationInfo();
1086 991
1087 // Move over the free list. Concatenate makes sure that the source free list 992 // Move over the free list. Concatenate makes sure that the source free list
1088 // gets properly reset after moving over all nodes. 993 // gets properly reset after moving over all nodes.
1089 intptr_t added = free_list_.Concatenate(other->free_list()); 994 intptr_t added = free_list_.Concatenate(other->free_list());
1090 995
1091 // Moved memory is not recorded as allocated memory, but rather increases and 996 // Moved memory is not recorded as allocated memory, but rather increases and
1092 // decreases capacity of the corresponding spaces. 997 // decreases capacity of the corresponding spaces. Used size and waste size
998 // are maintained by the receiving space upon allocating and freeing blocks.
1093 other->accounting_stats_.DecreaseCapacity(added); 999 other->accounting_stats_.DecreaseCapacity(added);
1094 accounting_stats_.IncreaseCapacity(added); 1000 accounting_stats_.IncreaseCapacity(added);
1095 } 1001 }
1096 1002
1097 1003
1098 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1099 // Unmerged fields: 1005 // Unmerged fields:
1100 // area_size_ 1006 // area_size_
1007 // allocation_info_
1008 // end_of_unswept_pages_
1009 // unswept_free_bytes_
1101 // anchor_ 1010 // anchor_
1102 1011
1103 MoveOverFreeMemory(other); 1012 MoveOverFreeMemory(other);
1104 1013
1105 // Update and clear accounting statistics. 1014 // Update and clear accounting statistics.
1106 accounting_stats_.Merge(other->accounting_stats_); 1015 accounting_stats_.Merge(other->accounting_stats_);
1107 other->accounting_stats_.Clear(); 1016 other->accounting_stats_.Reset();
1108
1109 // The linear allocation area of {other} should be destroyed now.
1110 DCHECK(other->top() == nullptr);
1111 DCHECK(other->limit() == nullptr);
1112
1113 DCHECK(other->end_of_unswept_pages_ == nullptr);
1114 1017
1115 AccountCommitted(other->CommittedMemory()); 1018 AccountCommitted(other->CommittedMemory());
1116 1019
1117 // Move over pages. 1020 // Move over pages.
1118 PageIterator it(other); 1021 PageIterator it(other);
1119 Page* p = nullptr; 1022 Page* p = nullptr;
1120 while (it.has_next()) { 1023 while (it.has_next()) {
1121 p = it.next(); 1024 p = it.next();
1122 p->Unlink(); 1025 p->Unlink();
1123 p->set_owner(this); 1026 p->set_owner(this);
(...skipping 1358 matching lines...) Expand 10 before | Expand all | Expand 10 after
2482 page = Page::FromAddress(node->address()); 2385 page = Page::FromAddress(node->address());
2483 page->add_available_in_large_free_list(-(*node_size)); 2386 page->add_available_in_large_free_list(-(*node_size));
2484 } 2387 }
2485 } 2388 }
2486 2389
2487 DCHECK(IsVeryLong() || available() == SumFreeLists()); 2390 DCHECK(IsVeryLong() || available() == SumFreeLists());
2488 return node; 2391 return node;
2489 } 2392 }
2490 2393
2491 2394
2492 FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2493 base::LockGuard<base::Mutex> guard(&mutex_);
2494 FreeSpace* node = nullptr;
2495 int node_size = 0;
2496 // Try to find a node that fits exactly.
2497 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2498 // If no node could be found get as much memory as possible.
2499 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2500 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2501 if (node != nullptr) {
2502 // Give back left overs that were not required by {size_in_bytes}.
2503 intptr_t aligned_size = RoundUp(hint_size_in_bytes, kPointerSize);
2504 intptr_t left_over = node_size - aligned_size;
2505 if (left_over > 0) {
2506 Free(node->address() + aligned_size, static_cast<int>(left_over));
2507 node->set_size(static_cast<int>(aligned_size));
2508 }
2509 }
2510 return node;
2511 }
2512
2513
2514 // Allocation on the old space free list. If it succeeds then a new linear 2395 // Allocation on the old space free list. If it succeeds then a new linear
2515 // allocation space has been set up with the top and limit of the space. If 2396 // allocation space has been set up with the top and limit of the space. If
2516 // the allocation fails then NULL is returned, and the caller can perform a GC 2397 // the allocation fails then NULL is returned, and the caller can perform a GC
2517 // or allocate a new page before retrying. 2398 // or allocate a new page before retrying.
2518 HeapObject* FreeList::Allocate(int size_in_bytes) { 2399 HeapObject* FreeList::Allocate(int size_in_bytes) {
2519 DCHECK(0 < size_in_bytes); 2400 DCHECK(0 < size_in_bytes);
2520 DCHECK(size_in_bytes <= kMaxBlockSize); 2401 DCHECK(size_in_bytes <= kMaxBlockSize);
2521 DCHECK(IsAligned(size_in_bytes, kPointerSize)); 2402 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2522 // Don't free list allocate if there is linear space available. 2403 // Don't free list allocate if there is linear space available.
2523 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); 2404 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2739 2620
2740 2621
2741 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2622 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2742 // Allocation in this space has failed. 2623 // Allocation in this space has failed.
2743 2624
2744 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2625 MarkCompactCollector* collector = heap()->mark_compact_collector();
2745 // Sweeping is still in progress. 2626 // Sweeping is still in progress.
2746 if (collector->sweeping_in_progress()) { 2627 if (collector->sweeping_in_progress()) {
2747 // First try to refill the free-list, concurrent sweeper threads 2628 // First try to refill the free-list, concurrent sweeper threads
2748 // may have freed some objects in the meantime. 2629 // may have freed some objects in the meantime.
2749 RefillFreeList(); 2630 collector->RefillFreeList(this);
2750 2631
2751 // Retry the free list allocation. 2632 // Retry the free list allocation.
2752 HeapObject* object = free_list_.Allocate(size_in_bytes); 2633 HeapObject* object = free_list_.Allocate(size_in_bytes);
2753 if (object != NULL) return object; 2634 if (object != NULL) return object;
2754 2635
2755 // If sweeping is still in progress try to sweep pages on the main thread. 2636 // If sweeping is still in progress try to sweep pages on the main thread.
2756 int free_chunk = collector->SweepInParallel(this, size_in_bytes); 2637 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2757 RefillFreeList(); 2638 collector->RefillFreeList(this);
2758 if (free_chunk >= size_in_bytes) { 2639 if (free_chunk >= size_in_bytes) {
2759 HeapObject* object = free_list_.Allocate(size_in_bytes); 2640 HeapObject* object = free_list_.Allocate(size_in_bytes);
2760 // We should be able to allocate an object here since we just freed that 2641 // We should be able to allocate an object here since we just freed that
2761 // much memory. 2642 // much memory.
2762 DCHECK(object != NULL); 2643 DCHECK(object != NULL);
2763 if (object != NULL) return object; 2644 if (object != NULL) return object;
2764 } 2645 }
2765 } 2646 }
2766 2647
2767 // Free list allocation failed and there is no next page. Fail if we have 2648 // Free list allocation failed and there is no next page. Fail if we have
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
3277 object->ShortPrint(); 3158 object->ShortPrint();
3278 PrintF("\n"); 3159 PrintF("\n");
3279 } 3160 }
3280 printf(" --------------------------------------\n"); 3161 printf(" --------------------------------------\n");
3281 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3162 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3282 } 3163 }
3283 3164
3284 #endif // DEBUG 3165 #endif // DEBUG
3285 } // namespace internal 3166 } // namespace internal
3286 } // namespace v8 3167 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698