Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(459)

Side by Side Diff: src/heap/spaces.cc

Issue 1412643002: Revert of "[heap] Divide available memory upon compaction tasks" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 964 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 PageIterator iterator(this); 975 PageIterator iterator(this);
976 while (iterator.has_next()) { 976 while (iterator.has_next()) {
977 heap()->isolate()->memory_allocator()->Free(iterator.next()); 977 heap()->isolate()->memory_allocator()->Free(iterator.next());
978 } 978 }
979 anchor_.set_next_page(&anchor_); 979 anchor_.set_next_page(&anchor_);
980 anchor_.set_prev_page(&anchor_); 980 anchor_.set_prev_page(&anchor_);
981 accounting_stats_.Clear(); 981 accounting_stats_.Clear();
982 } 982 }
983 983
984 984
985 void PagedSpace::AddMemory(Address start, intptr_t size) {
986 accounting_stats_.ExpandSpace(static_cast<int>(size));
987 Free(start, static_cast<int>(size));
988 }
989
990
991 FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
992 FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
993 if (free_space != nullptr) {
994 accounting_stats_.DecreaseCapacity(free_space->size());
995 }
996 return free_space;
997 }
998
999
1000 void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
1001 int num, intptr_t limit) {
1002 DCHECK_GT(num, 0);
1003 DCHECK(other != nullptr);
1004
1005 if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
1006
1007 EmptyAllocationInfo();
1008
1009 bool memory_available = true;
1010 bool spaces_need_memory = true;
1011 FreeSpace* node = nullptr;
1012 CompactionSpace* current_space = nullptr;
1013 // Iterate over spaces and memory as long as we have memory and there are
1014 // spaces in need of some.
1015 while (memory_available && spaces_need_memory) {
1016 spaces_need_memory = false;
1017 // Round-robin over all spaces.
1018 for (int i = 0; i < num; i++) {
1019 current_space = other[i]->Get(identity());
1020 if (current_space->free_list()->Available() < limit) {
1021 // Space has not reached its limit. Try to get some memory.
1022 spaces_need_memory = true;
1023 node = TryRemoveMemory(limit - current_space->free_list()->Available());
1024 if (node != nullptr) {
1025 CHECK(current_space->identity() == identity());
1026 current_space->AddMemory(node->address(), node->size());
1027 } else {
1028 memory_available = false;
1029 break;
1030 }
1031 }
1032 }
1033 }
1034 }
1035
1036
1037 void PagedSpace::RefillFreeList() {
1038 MarkCompactCollector* collector = heap()->mark_compact_collector();
1039 FreeList* free_list = nullptr;
1040 if (this == heap()->old_space()) {
1041 free_list = collector->free_list_old_space().get();
1042 } else if (this == heap()->code_space()) {
1043 free_list = collector->free_list_code_space().get();
1044 } else if (this == heap()->map_space()) {
1045 free_list = collector->free_list_map_space().get();
1046 } else {
1047 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1048 // generation spaces out.
1049 return;
1050 }
1051 DCHECK(free_list != nullptr);
1052 intptr_t added = free_list_.Concatenate(free_list);
1053 accounting_stats_.IncreaseCapacity(added);
1054 }
1055
1056
1057 void CompactionSpace::RefillFreeList() {
1058 MarkCompactCollector* collector = heap()->mark_compact_collector();
1059 FreeList* free_list = nullptr;
1060 if (identity() == OLD_SPACE) {
1061 free_list = collector->free_list_old_space().get();
1062 } else if (identity() == CODE_SPACE) {
1063 free_list = collector->free_list_code_space().get();
1064 } else {
1065 // Compaction spaces only represent old or code space.
1066 UNREACHABLE();
1067 }
1068 DCHECK(free_list != nullptr);
1069 intptr_t refilled = 0;
1070 while (refilled < kCompactionMemoryWanted) {
1071 FreeSpace* node =
1072 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1073 if (node == nullptr) return;
1074 refilled += node->size();
1075 AddMemory(node->address(), node->size());
1076 }
1077 }
1078
1079
1080 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { 985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
1081 DCHECK(identity() == other->identity()); 986 DCHECK(identity() == other->identity());
1082 // Destroy the linear allocation space of {other}. This is needed to 987 // Destroy the linear allocation space of {other}. This is needed to
1083 // (a) not waste the memory and 988 // (a) not waste the memory and
1084 // (b) keep the rest of the chunk in an iterable state (filler is needed). 989 // (b) keep the rest of the chunk in an iterable state (filler is needed).
1085 other->EmptyAllocationInfo(); 990 other->EmptyAllocationInfo();
1086 991
1087 // Move over the free list. Concatenate makes sure that the source free list 992 // Move over the free list. Concatenate makes sure that the source free list
1088 // gets properly reset after moving over all nodes. 993 // gets properly reset after moving over all nodes.
1089 intptr_t added = free_list_.Concatenate(other->free_list()); 994 intptr_t added = free_list_.Concatenate(other->free_list());
1090 995
1091 // Moved memory is not recorded as allocated memory, but rather increases and 996 // Moved memory is not recorded as allocated memory, but rather increases and
1092 // decreases capacity of the corresponding spaces. 997 // decreases capacity of the corresponding spaces. Used size and waste size
998 // are maintained by the receiving space upon allocating and freeing blocks.
1093 other->accounting_stats_.DecreaseCapacity(added); 999 other->accounting_stats_.DecreaseCapacity(added);
1094 accounting_stats_.IncreaseCapacity(added); 1000 accounting_stats_.IncreaseCapacity(added);
1095 } 1001 }
1096 1002
1097 1003
1098 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1099 // Unmerged fields: 1005 // Unmerged fields:
1100 // area_size_ 1006 // area_size_
1007 // allocation_info_
1008 // end_of_unswept_pages_
1009 // unswept_free_bytes_
1101 // anchor_ 1010 // anchor_
1102 1011
1103 MoveOverFreeMemory(other); 1012 MoveOverFreeMemory(other);
1104 1013
1105 // Update and clear accounting statistics. 1014 // Update and clear accounting statistics.
1106 accounting_stats_.Merge(other->accounting_stats_); 1015 accounting_stats_.Merge(other->accounting_stats_);
1107 other->accounting_stats_.Clear(); 1016 other->accounting_stats_.Reset();
1108
1109 // The linear allocation area of {other} should be destroyed now.
1110 DCHECK(other->top() == nullptr);
1111 DCHECK(other->limit() == nullptr);
1112
1113 DCHECK(other->end_of_unswept_pages_ == nullptr);
1114 1017
1115 AccountCommitted(other->CommittedMemory()); 1018 AccountCommitted(other->CommittedMemory());
1116 1019
1117 // Move over pages. 1020 // Move over pages.
1118 PageIterator it(other); 1021 PageIterator it(other);
1119 Page* p = nullptr; 1022 Page* p = nullptr;
1120 while (it.has_next()) { 1023 while (it.has_next()) {
1121 p = it.next(); 1024 p = it.next();
1122 p->Unlink(); 1025 p->Unlink();
1123 p->set_owner(this); 1026 p->set_owner(this);
(...skipping 1337 matching lines...) Expand 10 before | Expand all | Expand 10 after
2461 page = Page::FromAddress(node->address()); 2364 page = Page::FromAddress(node->address());
2462 page->add_available_in_large_free_list(-(*node_size)); 2365 page->add_available_in_large_free_list(-(*node_size));
2463 } 2366 }
2464 } 2367 }
2465 2368
2466 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2369 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2467 return node; 2370 return node;
2468 } 2371 }
2469 2372
2470 2373
2471 FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2472 base::LockGuard<base::Mutex> guard(&mutex_);
2473 FreeSpace* node = nullptr;
2474 int node_size = 0;
2475 // Try to find a node that fits exactly.
2476 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2477 // If no node could be found get as much memory as possible.
2478 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2479 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2480 if (node != nullptr) {
2481 // Give back left overs that were not required by {size_in_bytes}.
2482 intptr_t aligned_size = RoundUp(hint_size_in_bytes, kPointerSize);
2483 intptr_t left_over = node_size - aligned_size;
2484 if (left_over > 0) {
2485 Free(node->address() + aligned_size, static_cast<int>(left_over));
2486 node->set_size(static_cast<int>(aligned_size));
2487 }
2488 }
2489 return node;
2490 }
2491
2492
2493 // Allocation on the old space free list. If it succeeds then a new linear 2374 // Allocation on the old space free list. If it succeeds then a new linear
2494 // allocation space has been set up with the top and limit of the space. If 2375 // allocation space has been set up with the top and limit of the space. If
2495 // the allocation fails then NULL is returned, and the caller can perform a GC 2376 // the allocation fails then NULL is returned, and the caller can perform a GC
2496 // or allocate a new page before retrying. 2377 // or allocate a new page before retrying.
2497 HeapObject* FreeList::Allocate(int size_in_bytes) { 2378 HeapObject* FreeList::Allocate(int size_in_bytes) {
2498 DCHECK(0 < size_in_bytes); 2379 DCHECK(0 < size_in_bytes);
2499 DCHECK(size_in_bytes <= kMaxBlockSize); 2380 DCHECK(size_in_bytes <= kMaxBlockSize);
2500 DCHECK(IsAligned(size_in_bytes, kPointerSize)); 2381 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2501 // Don't free list allocate if there is linear space available. 2382 // Don't free list allocate if there is linear space available.
2502 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); 2383 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
2717 2598
2718 2599
2719 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2600 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2720 // Allocation in this space has failed. 2601 // Allocation in this space has failed.
2721 2602
2722 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2603 MarkCompactCollector* collector = heap()->mark_compact_collector();
2723 // Sweeping is still in progress. 2604 // Sweeping is still in progress.
2724 if (collector->sweeping_in_progress()) { 2605 if (collector->sweeping_in_progress()) {
2725 // First try to refill the free-list, concurrent sweeper threads 2606 // First try to refill the free-list, concurrent sweeper threads
2726 // may have freed some objects in the meantime. 2607 // may have freed some objects in the meantime.
2727 RefillFreeList(); 2608 collector->RefillFreeList(this);
2728 2609
2729 // Retry the free list allocation. 2610 // Retry the free list allocation.
2730 HeapObject* object = free_list_.Allocate(size_in_bytes); 2611 HeapObject* object = free_list_.Allocate(size_in_bytes);
2731 if (object != NULL) return object; 2612 if (object != NULL) return object;
2732 2613
2733 // If sweeping is still in progress try to sweep pages on the main thread. 2614 // If sweeping is still in progress try to sweep pages on the main thread.
2734 int free_chunk = collector->SweepInParallel(this, size_in_bytes); 2615 int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2735 RefillFreeList(); 2616 collector->RefillFreeList(this);
2736 if (free_chunk >= size_in_bytes) { 2617 if (free_chunk >= size_in_bytes) {
2737 HeapObject* object = free_list_.Allocate(size_in_bytes); 2618 HeapObject* object = free_list_.Allocate(size_in_bytes);
2738 // We should be able to allocate an object here since we just freed that 2619 // We should be able to allocate an object here since we just freed that
2739 // much memory. 2620 // much memory.
2740 DCHECK(object != NULL); 2621 DCHECK(object != NULL);
2741 if (object != NULL) return object; 2622 if (object != NULL) return object;
2742 } 2623 }
2743 } 2624 }
2744 2625
2745 // Free list allocation failed and there is no next page. Fail if we have 2626 // Free list allocation failed and there is no next page. Fail if we have
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
3255 object->ShortPrint(); 3136 object->ShortPrint();
3256 PrintF("\n"); 3137 PrintF("\n");
3257 } 3138 }
3258 printf(" --------------------------------------\n"); 3139 printf(" --------------------------------------\n");
3259 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3140 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3260 } 3141 }
3261 3142
3262 #endif // DEBUG 3143 #endif // DEBUG
3263 } // namespace internal 3144 } // namespace internal
3264 } // namespace v8 3145 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap-tester.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698