Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/heap/spaces.cc

Issue 1421583002: Reland "[heap] Divide available memory upon compaction tasks" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: s/+1/+kPointerSize/ Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 964 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 PageIterator iterator(this); 975 PageIterator iterator(this);
976 while (iterator.has_next()) { 976 while (iterator.has_next()) {
977 heap()->isolate()->memory_allocator()->Free(iterator.next()); 977 heap()->isolate()->memory_allocator()->Free(iterator.next());
978 } 978 }
979 anchor_.set_next_page(&anchor_); 979 anchor_.set_next_page(&anchor_);
980 anchor_.set_prev_page(&anchor_); 980 anchor_.set_prev_page(&anchor_);
981 accounting_stats_.Clear(); 981 accounting_stats_.Clear();
982 } 982 }
983 983
984 984
985 void PagedSpace::AddMemory(Address start, intptr_t size) {
986 accounting_stats_.ExpandSpace(static_cast<int>(size));
987 Free(start, static_cast<int>(size));
988 }
989
990
991 FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
992 FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
993 if (free_space != nullptr) {
994 accounting_stats_.DecreaseCapacity(free_space->size());
995 }
996 return free_space;
997 }
998
999
1000 void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
1001 int num, intptr_t limit) {
1002 DCHECK_GT(num, 0);
1003 DCHECK(other != nullptr);
1004
1005 if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
1006
1007 EmptyAllocationInfo();
1008
1009 bool memory_available = true;
1010 bool spaces_need_memory = true;
1011 FreeSpace* node = nullptr;
1012 CompactionSpace* current_space = nullptr;
1013 // Iterate over spaces and memory as long as we have memory and there are
1014 // spaces in need of some.
1015 while (memory_available && spaces_need_memory) {
1016 spaces_need_memory = false;
1017 // Round-robin over all spaces.
1018 for (int i = 0; i < num; i++) {
1019 current_space = other[i]->Get(identity());
1020 if (current_space->free_list()->Available() < limit) {
1021 // Space has not reached its limit. Try to get some memory.
1022 spaces_need_memory = true;
1023 node = TryRemoveMemory(limit - current_space->free_list()->Available());
1024 if (node != nullptr) {
1025 CHECK(current_space->identity() == identity());
1026 current_space->AddMemory(node->address(), node->size());
1027 } else {
1028 memory_available = false;
1029 break;
1030 }
1031 }
1032 }
1033 }
1034 }
1035
1036
1037 void PagedSpace::RefillFreeList() {
1038 MarkCompactCollector* collector = heap()->mark_compact_collector();
1039 FreeList* free_list = nullptr;
1040 if (this == heap()->old_space()) {
1041 free_list = collector->free_list_old_space().get();
1042 } else if (this == heap()->code_space()) {
1043 free_list = collector->free_list_code_space().get();
1044 } else if (this == heap()->map_space()) {
1045 free_list = collector->free_list_map_space().get();
1046 } else {
1047 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1048 // generation spaces out.
1049 return;
1050 }
1051 DCHECK(free_list != nullptr);
1052 intptr_t added = free_list_.Concatenate(free_list);
1053 accounting_stats_.IncreaseCapacity(added);
1054 }
1055
1056
1057 void CompactionSpace::RefillFreeList() {
1058 MarkCompactCollector* collector = heap()->mark_compact_collector();
1059 FreeList* free_list = nullptr;
1060 if (identity() == OLD_SPACE) {
1061 free_list = collector->free_list_old_space().get();
1062 } else if (identity() == CODE_SPACE) {
1063 free_list = collector->free_list_code_space().get();
1064 } else {
1065 // Compaction spaces only represent old or code space.
1066 UNREACHABLE();
1067 }
1068 DCHECK(free_list != nullptr);
1069 intptr_t refilled = 0;
1070 while (refilled < kCompactionMemoryWanted) {
1071 FreeSpace* node =
1072 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1073 if (node == nullptr) return;
1074 refilled += node->size();
1075 AddMemory(node->address(), node->size());
1076 }
1077 }
1078
1079
985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { 1080 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
986 DCHECK(identity() == other->identity()); 1081 DCHECK(identity() == other->identity());
987 // Destroy the linear allocation space of {other}. This is needed to 1082 // Destroy the linear allocation space of {other}. This is needed to
988 // (a) not waste the memory and 1083 // (a) not waste the memory and
989 // (b) keep the rest of the chunk in an iterable state (filler is needed). 1084 // (b) keep the rest of the chunk in an iterable state (filler is needed).
990 other->EmptyAllocationInfo(); 1085 other->EmptyAllocationInfo();
991 1086
992 // Move over the free list. Concatenate makes sure that the source free list 1087 // Move over the free list. Concatenate makes sure that the source free list
993 // gets properly reset after moving over all nodes. 1088 // gets properly reset after moving over all nodes.
994 intptr_t added = free_list_.Concatenate(other->free_list()); 1089 intptr_t added = free_list_.Concatenate(other->free_list());
995 1090
996 // Moved memory is not recorded as allocated memory, but rather increases and 1091 // Moved memory is not recorded as allocated memory, but rather increases and
997 // decreases capacity of the corresponding spaces. Used size and waste size 1092 // decreases capacity of the corresponding spaces.
998 // are maintained by the receiving space upon allocating and freeing blocks.
999 other->accounting_stats_.DecreaseCapacity(added); 1093 other->accounting_stats_.DecreaseCapacity(added);
1000 accounting_stats_.IncreaseCapacity(added); 1094 accounting_stats_.IncreaseCapacity(added);
1001 } 1095 }
1002 1096
1003 1097
1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1098 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1005 // Unmerged fields: 1099 // Unmerged fields:
1006 // area_size_ 1100 // area_size_
1007 // allocation_info_
1008 // end_of_unswept_pages_
1009 // unswept_free_bytes_
1010 // anchor_ 1101 // anchor_
1011 1102
1012 MoveOverFreeMemory(other); 1103 MoveOverFreeMemory(other);
1013 1104
1014 // Update and clear accounting statistics. 1105 // Update and clear accounting statistics.
1015 accounting_stats_.Merge(other->accounting_stats_); 1106 accounting_stats_.Merge(other->accounting_stats_);
1016 other->accounting_stats_.Reset(); 1107 other->accounting_stats_.Clear();
1108
1109 // The linear allocation area of {other} should be destroyed now.
1110 DCHECK(other->top() == nullptr);
1111 DCHECK(other->limit() == nullptr);
1112
1113 DCHECK(other->end_of_unswept_pages_ == nullptr);
1017 1114
1018 AccountCommitted(other->CommittedMemory()); 1115 AccountCommitted(other->CommittedMemory());
1019 1116
1020 // Move over pages. 1117 // Move over pages.
1021 PageIterator it(other); 1118 PageIterator it(other);
1022 Page* p = nullptr; 1119 Page* p = nullptr;
1023 while (it.has_next()) { 1120 while (it.has_next()) {
1024 p = it.next(); 1121 p = it.next();
1025 p->Unlink(); 1122 p->Unlink();
1026 p->set_owner(this); 1123 p->set_owner(this);
(...skipping 1346 matching lines...) Expand 10 before | Expand all | Expand 10 after
2373 page = Page::FromAddress(node->address()); 2470 page = Page::FromAddress(node->address());
2374 page->add_available_in_large_free_list(-(*node_size)); 2471 page->add_available_in_large_free_list(-(*node_size));
2375 } 2472 }
2376 } 2473 }
2377 2474
2378 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2475 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2379 return node; 2476 return node;
2380 } 2477 }
2381 2478
2382 2479
2480 FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2481 hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
2482 base::LockGuard<base::Mutex> guard(&mutex_);
2483 FreeSpace* node = nullptr;
2484 int node_size = 0;
2485 // Try to find a node that fits exactly.
2486 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2487 // If no node could be found get as much memory as possible.
2488 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2489 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2490 if (node != nullptr) {
2491 // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
2492 // size larger then the minimum size required for FreeSpace, and (b) to get
2493 // a block that can actually be freed into some FreeList later on.
2494 if (hint_size_in_bytes <= kSmallListMin) {
2495 hint_size_in_bytes = kSmallListMin + kPointerSize;
2496 }
2497 // Give back left overs that were not required by {size_in_bytes}.
2498 intptr_t left_over = node_size - hint_size_in_bytes;
2499
2500 // Do not bother to return anything below {kSmallListMin} as it would be
2501 // immediately discarded anyways.
2502 if (left_over > kSmallListMin) {
2503 Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
2504 node->set_size(static_cast<int>(hint_size_in_bytes));
2505 }
2506 }
2507 return node;
2508 }
2509
2510
2383 // Allocation on the old space free list. If it succeeds then a new linear 2511 // Allocation on the old space free list. If it succeeds then a new linear
2384 // allocation space has been set up with the top and limit of the space. If 2512 // allocation space has been set up with the top and limit of the space. If
2385 // the allocation fails then NULL is returned, and the caller can perform a GC 2513 // the allocation fails then NULL is returned, and the caller can perform a GC
2386 // or allocate a new page before retrying. 2514 // or allocate a new page before retrying.
2387 HeapObject* FreeList::Allocate(int size_in_bytes) { 2515 HeapObject* FreeList::Allocate(int size_in_bytes) {
2388 DCHECK(0 < size_in_bytes); 2516 DCHECK(0 < size_in_bytes);
2389 DCHECK(size_in_bytes <= kMaxBlockSize); 2517 DCHECK(size_in_bytes <= kMaxBlockSize);
2390 DCHECK(IsAligned(size_in_bytes, kPointerSize)); 2518 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2391 // Don't free list allocate if there is linear space available. 2519 // Don't free list allocate if there is linear space available.
2392 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); 2520 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
2601 2729
2602 2730
2603 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2731 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2604 // Allocation in this space has failed. 2732 // Allocation in this space has failed.
2605 2733
2606 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2734 MarkCompactCollector* collector = heap()->mark_compact_collector();
2607 // Sweeping is still in progress. 2735 // Sweeping is still in progress.
2608 if (collector->sweeping_in_progress()) { 2736 if (collector->sweeping_in_progress()) {
2609 // First try to refill the free-list, concurrent sweeper threads 2737 // First try to refill the free-list, concurrent sweeper threads
2610 // may have freed some objects in the meantime. 2738 // may have freed some objects in the meantime.
2611 collector->RefillFreeList(this); 2739 RefillFreeList();
2612 2740
2613 // Retry the free list allocation. 2741 // Retry the free list allocation.
2614 HeapObject* object = free_list_.Allocate(size_in_bytes); 2742 HeapObject* object = free_list_.Allocate(size_in_bytes);
2615 if (object != NULL) return object; 2743 if (object != NULL) return object;
2616 2744
2617 // If sweeping is still in progress try to sweep pages on the main thread. 2745 // If sweeping is still in progress try to sweep pages on the main thread.
2618 int free_chunk = collector->SweepInParallel(this, size_in_bytes); 2746 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2619 collector->RefillFreeList(this); 2747 RefillFreeList();
2620 if (free_chunk >= size_in_bytes) { 2748 if (free_chunk >= size_in_bytes) {
2621 HeapObject* object = free_list_.Allocate(size_in_bytes); 2749 HeapObject* object = free_list_.Allocate(size_in_bytes);
2622 // We should be able to allocate an object here since we just freed that 2750 // We should be able to allocate an object here since we just freed that
2623 // much memory. 2751 // much memory.
2624 DCHECK(object != NULL); 2752 DCHECK(object != NULL);
2625 if (object != NULL) return object; 2753 if (object != NULL) return object;
2626 } 2754 }
2627 } 2755 }
2628 2756
2629 // Free list allocation failed and there is no next page. Fail if we have 2757 // Free list allocation failed and there is no next page. Fail if we have
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
3139 object->ShortPrint(); 3267 object->ShortPrint();
3140 PrintF("\n"); 3268 PrintF("\n");
3141 } 3269 }
3142 printf(" --------------------------------------\n"); 3270 printf(" --------------------------------------\n");
3143 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3271 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3144 } 3272 }
3145 3273
3146 #endif // DEBUG 3274 #endif // DEBUG
3147 } // namespace internal 3275 } // namespace internal
3148 } // namespace v8 3276 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698