Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1215)

Side by Side Diff: src/heap/spaces.cc

Issue 1347873003: Revert of [heap] Introduce parallel compaction algorithm. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters-2nd-try
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 73
74 // ----------------------------------------------------------------------------- 74 // -----------------------------------------------------------------------------
75 // CodeRange 75 // CodeRange
76 76
77 77
78 CodeRange::CodeRange(Isolate* isolate) 78 CodeRange::CodeRange(Isolate* isolate)
79 : isolate_(isolate), 79 : isolate_(isolate),
80 code_range_(NULL), 80 code_range_(NULL),
81 free_list_(0), 81 free_list_(0),
82 allocation_list_(0), 82 allocation_list_(0),
83 current_allocation_block_index_(0) {} 83 current_allocation_block_index_(0),
84 emergency_block_() {}
84 85
85 86
86 bool CodeRange::SetUp(size_t requested) { 87 bool CodeRange::SetUp(size_t requested) {
87 DCHECK(code_range_ == NULL); 88 DCHECK(code_range_ == NULL);
88 89
89 if (requested == 0) { 90 if (requested == 0) {
90 // When a target requires the code range feature, we put all code objects 91 // When a target requires the code range feature, we put all code objects
91 // in a kMaximalCodeRangeSize range of virtual address space, so that 92 // in a kMaximalCodeRangeSize range of virtual address space, so that
92 // they can call each other with near calls. 93 // they can call each other with near calls.
93 if (kRequiresCodeRange) { 94 if (kRequiresCodeRange) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
132 } 133 }
133 base += kReservedCodeRangePages * base::OS::CommitPageSize(); 134 base += kReservedCodeRangePages * base::OS::CommitPageSize();
134 } 135 }
135 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); 136 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
136 size_t size = code_range_->size() - (aligned_base - base) - 137 size_t size = code_range_->size() - (aligned_base - base) -
137 kReservedCodeRangePages * base::OS::CommitPageSize(); 138 kReservedCodeRangePages * base::OS::CommitPageSize();
138 allocation_list_.Add(FreeBlock(aligned_base, size)); 139 allocation_list_.Add(FreeBlock(aligned_base, size));
139 current_allocation_block_index_ = 0; 140 current_allocation_block_index_ = 0;
140 141
141 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); 142 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
143 ReserveEmergencyBlock();
142 return true; 144 return true;
143 } 145 }
144 146
145 147
146 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 148 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
147 const FreeBlock* right) { 149 const FreeBlock* right) {
148 // The entire point of CodeRange is that the difference between two 150 // The entire point of CodeRange is that the difference between two
149 // addresses in the range can be represented as a signed 32-bit int, 151 // addresses in the range can be represented as a signed 32-bit int,
150 // so the cast is semantically correct. 152 // so the cast is semantically correct.
151 return static_cast<int>(left->start - right->start); 153 return static_cast<int>(left->start - right->start);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 return true; 269 return true;
268 } 270 }
269 271
270 272
271 void CodeRange::ReleaseBlock(const FreeBlock* block) { 273 void CodeRange::ReleaseBlock(const FreeBlock* block) {
272 base::LockGuard<base::Mutex> guard(&code_range_mutex_); 274 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
273 free_list_.Add(*block); 275 free_list_.Add(*block);
274 } 276 }
275 277
276 278
279 void CodeRange::ReserveEmergencyBlock() {
280 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
281 if (emergency_block_.size == 0) {
282 ReserveBlock(requested_size, &emergency_block_);
283 } else {
284 DCHECK(emergency_block_.size >= requested_size);
285 }
286 }
287
288
289 void CodeRange::ReleaseEmergencyBlock() {
290 if (emergency_block_.size != 0) {
291 ReleaseBlock(&emergency_block_);
292 emergency_block_.size = 0;
293 }
294 }
295
296
277 // ----------------------------------------------------------------------------- 297 // -----------------------------------------------------------------------------
278 // MemoryAllocator 298 // MemoryAllocator
279 // 299 //
280 300
281 MemoryAllocator::MemoryAllocator(Isolate* isolate) 301 MemoryAllocator::MemoryAllocator(Isolate* isolate)
282 : isolate_(isolate), 302 : isolate_(isolate),
283 capacity_(0), 303 capacity_(0),
284 capacity_executable_(0), 304 capacity_executable_(0),
285 size_(0), 305 size_(0),
286 size_executable_(0), 306 size_executable_(0),
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
465 chunk->area_end_ = area_end; 485 chunk->area_end_ = area_end;
466 chunk->flags_ = 0; 486 chunk->flags_ = 0;
467 chunk->set_owner(owner); 487 chunk->set_owner(owner);
468 chunk->InitializeReservedMemory(); 488 chunk->InitializeReservedMemory();
469 chunk->slots_buffer_ = NULL; 489 chunk->slots_buffer_ = NULL;
470 chunk->skip_list_ = NULL; 490 chunk->skip_list_ = NULL;
471 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; 491 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
472 chunk->progress_bar_ = 0; 492 chunk->progress_bar_ = 0;
473 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); 493 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
474 chunk->set_parallel_sweeping(SWEEPING_DONE); 494 chunk->set_parallel_sweeping(SWEEPING_DONE);
475 chunk->parallel_compaction_state().SetValue(kCompactingDone);
476 chunk->mutex_ = NULL; 495 chunk->mutex_ = NULL;
477 chunk->available_in_small_free_list_ = 0; 496 chunk->available_in_small_free_list_ = 0;
478 chunk->available_in_medium_free_list_ = 0; 497 chunk->available_in_medium_free_list_ = 0;
479 chunk->available_in_large_free_list_ = 0; 498 chunk->available_in_large_free_list_ = 0;
480 chunk->available_in_huge_free_list_ = 0; 499 chunk->available_in_huge_free_list_ = 0;
481 chunk->non_available_small_blocks_ = 0; 500 chunk->non_available_small_blocks_ = 0;
482 chunk->ResetLiveBytes(); 501 chunk->ResetLiveBytes();
483 Bitmap::Clear(chunk); 502 Bitmap::Clear(chunk);
484 chunk->initialize_scan_on_scavenge(false); 503 chunk->initialize_scan_on_scavenge(false);
485 chunk->SetFlag(WAS_SWEPT); 504 chunk->SetFlag(WAS_SWEPT);
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after
948 ObjectSpace::kObjectSpaceCodeSpace); 967 ObjectSpace::kObjectSpaceCodeSpace);
949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == 968 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
950 ObjectSpace::kObjectSpaceMapSpace); 969 ObjectSpace::kObjectSpaceMapSpace);
951 970
952 971
953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, 972 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
954 Executability executable) 973 Executability executable)
955 : Space(heap, space, executable), 974 : Space(heap, space, executable),
956 free_list_(this), 975 free_list_(this),
957 unswept_free_bytes_(0), 976 unswept_free_bytes_(0),
958 end_of_unswept_pages_(NULL) { 977 end_of_unswept_pages_(NULL),
978 emergency_memory_(NULL) {
959 area_size_ = MemoryAllocator::PageAreaSize(space); 979 area_size_ = MemoryAllocator::PageAreaSize(space);
960 accounting_stats_.Clear(); 980 accounting_stats_.Clear();
961 981
962 allocation_info_.set_top(NULL); 982 allocation_info_.set_top(NULL);
963 allocation_info_.set_limit(NULL); 983 allocation_info_.set_limit(NULL);
964 984
965 anchor_.InitializeAsAnchor(this); 985 anchor_.InitializeAsAnchor(this);
966 } 986 }
967 987
968 988
969 bool PagedSpace::SetUp() { return true; } 989 bool PagedSpace::SetUp() { return true; }
970 990
971 991
972 bool PagedSpace::HasBeenSetUp() { return true; } 992 bool PagedSpace::HasBeenSetUp() { return true; }
973 993
974 994
975 void PagedSpace::TearDown() { 995 void PagedSpace::TearDown() {
976 PageIterator iterator(this); 996 PageIterator iterator(this);
977 while (iterator.has_next()) { 997 while (iterator.has_next()) {
978 heap()->isolate()->memory_allocator()->Free(iterator.next()); 998 heap()->isolate()->memory_allocator()->Free(iterator.next());
979 } 999 }
980 anchor_.set_next_page(&anchor_); 1000 anchor_.set_next_page(&anchor_);
981 anchor_.set_prev_page(&anchor_); 1001 anchor_.set_prev_page(&anchor_);
982 accounting_stats_.Clear(); 1002 accounting_stats_.Clear();
983 } 1003 }
984 1004
985 1005
986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
987 DCHECK(identity() == other->identity());
988 // Destroy the linear allocation space of {other}. This is needed to
989 // (a) not waste the memory and
990 // (b) keep the rest of the chunk in an iterable state (filler is needed).
991 other->EmptyAllocationInfo();
992
993 // Move over the free list. Concatenate makes sure that the source free list
994 // gets properly reset after moving over all nodes.
995 intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
996 other->accounting_stats_.AllocateBytes(freed_bytes);
997 // We do not adjust accounting_stats_ for {this} as we treat the received
998 // memory as borrowed, i.e., the originating space keeps track of its
999 // capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly
1000 // maintained by allocating and freeing blocks.
1001 }
1002
1003
1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1006 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1005 // Unmerged fields: 1007 // Unmerged fields:
1006 // area_size_ 1008 // area_size_
1007 // allocation_info_ 1009 // allocation_info_
1010 // emergency_memory_
1008 // end_of_unswept_pages_ 1011 // end_of_unswept_pages_
1009 // unswept_free_bytes_ 1012 // unswept_free_bytes_
1010 // anchor_ 1013 // anchor_
1011 1014
1012 MoveOverFreeMemory(other); 1015 // It only makes sense to merge compatible spaces.
1016 DCHECK(identity() == other->identity());
1017
1018 // Destroy the linear allocation space of {other}. This is needed to (a) not
1019 // waste the memory and (b) keep the rest of the chunk in an iterable state
1020 // (filler is needed).
1021 int linear_size = static_cast<int>(other->limit() - other->top());
1022 other->Free(other->top(), linear_size);
1023
1024 // Move over the free list.
1025 free_list_.Concatenate(other->free_list());
1013 1026
1014 // Update and clear accounting statistics. 1027 // Update and clear accounting statistics.
1015 accounting_stats_.Merge(other->accounting_stats_); 1028 accounting_stats_.Merge(other->accounting_stats_);
1016 other->accounting_stats_.Reset(); 1029 other->accounting_stats_.Clear();
1017 1030
1018 // Move over pages. 1031 // Move over pages.
1019 PageIterator it(other); 1032 PageIterator it(other);
1020 Page* p = nullptr; 1033 Page* p = nullptr;
1021 while (it.has_next()) { 1034 while (it.has_next()) {
1022 p = it.next(); 1035 p = it.next();
1023 p->Unlink(); 1036 p->Unlink();
1024 p->set_owner(this); 1037 p->set_owner(this);
1025 p->InsertAfter(anchor_.prev_page()); 1038 p->InsertAfter(anchor_.prev_page());
1026 } 1039 }
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1090 if (!CanExpand(size)) return false; 1103 if (!CanExpand(size)) return false;
1091 1104
1092 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, 1105 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1093 executable()); 1106 executable());
1094 if (p == NULL) return false; 1107 if (p == NULL) return false;
1095 1108
1096 // Pages created during bootstrapping may contain immortal immovable objects. 1109 // Pages created during bootstrapping may contain immortal immovable objects.
1097 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1110 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1098 1111
1099 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1112 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1113 DCHECK(heap()->CommittedOldGenerationMemory() <=
1114 heap()->MaxOldGenerationSize() +
1115 PagedSpace::MaxEmergencyMemoryAllocated());
1100 1116
1101 p->InsertAfter(anchor_.prev_page()); 1117 p->InsertAfter(anchor_.prev_page());
1102 1118
1103 return true; 1119 return true;
1104 } 1120 }
1105 1121
1106 1122
1107 int PagedSpace::CountTotalPages() { 1123 int PagedSpace::CountTotalPages() {
1108 PageIterator it(this); 1124 PageIterator it(this);
1109 int count = 0; 1125 int count = 0;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1159 page->Unlink(); 1175 page->Unlink();
1160 } 1176 }
1161 1177
1162 heap()->QueueMemoryChunkForFree(page); 1178 heap()->QueueMemoryChunkForFree(page);
1163 1179
1164 DCHECK(Capacity() > 0); 1180 DCHECK(Capacity() > 0);
1165 accounting_stats_.ShrinkSpace(AreaSize()); 1181 accounting_stats_.ShrinkSpace(AreaSize());
1166 } 1182 }
1167 1183
1168 1184
1185 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1186 // New space and large object space.
1187 static const int spaces_without_emergency_memory = 2;
1188 static const int spaces_with_emergency_memory =
1189 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1190 return Page::kPageSize * spaces_with_emergency_memory;
1191 }
1192
1193
1194 void PagedSpace::CreateEmergencyMemory() {
1195 if (identity() == CODE_SPACE) {
1196 // Make the emergency block available to the allocator.
1197 CodeRange* code_range = heap()->isolate()->code_range();
1198 if (code_range != NULL && code_range->valid()) {
1199 code_range->ReleaseEmergencyBlock();
1200 }
1201 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1202 }
1203 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1204 AreaSize(), AreaSize(), executable(), this);
1205 }
1206
1207
1208 void PagedSpace::FreeEmergencyMemory() {
1209 Page* page = static_cast<Page*>(emergency_memory_);
1210 DCHECK(page->LiveBytes() == 0);
1211 DCHECK(AreaSize() == page->area_size());
1212 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1213 heap()->isolate()->memory_allocator()->Free(page);
1214 emergency_memory_ = NULL;
1215 }
1216
1217
1218 void PagedSpace::UseEmergencyMemory() {
1219 // Page::Initialize makes the chunk into a real page and adds it to the
1220 // accounting for this space. Unlike PagedSpace::Expand, we don't check
1221 // CanExpand first, so we can go over the limits a little here. That's OK,
1222 // because we are in the process of compacting which will free up at least as
1223 // much memory as it allocates.
1224 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1225 page->InsertAfter(anchor_.prev_page());
1226 emergency_memory_ = NULL;
1227 }
1228
1229
1169 #ifdef DEBUG 1230 #ifdef DEBUG
1170 void PagedSpace::Print() {} 1231 void PagedSpace::Print() {}
1171 #endif 1232 #endif
1172 1233
1173 #ifdef VERIFY_HEAP 1234 #ifdef VERIFY_HEAP
1174 void PagedSpace::Verify(ObjectVisitor* visitor) { 1235 void PagedSpace::Verify(ObjectVisitor* visitor) {
1175 bool allocation_pointer_found_in_space = 1236 bool allocation_pointer_found_in_space =
1176 (allocation_info_.top() == allocation_info_.limit()); 1237 (allocation_info_.top() == allocation_info_.limit());
1177 PageIterator page_iterator(this); 1238 PageIterator page_iterator(this);
1178 while (page_iterator.has_next()) { 1239 while (page_iterator.has_next()) {
(...skipping 1944 matching lines...) Expand 10 before | Expand all | Expand 10 after
3123 object->ShortPrint(); 3184 object->ShortPrint();
3124 PrintF("\n"); 3185 PrintF("\n");
3125 } 3186 }
3126 printf(" --------------------------------------\n"); 3187 printf(" --------------------------------------\n");
3127 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3188 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3128 } 3189 }
3129 3190
3130 #endif // DEBUG 3191 #endif // DEBUG
3131 } // namespace internal 3192 } // namespace internal
3132 } // namespace v8 3193 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698