Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(278)

Side by Side Diff: src/heap/heap.cc

Issue 2493803002: [heap] Add basic infrastructure for Minor Mark-Compact collector (Closed)
Patch Set: Addressed comment Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after
279 new_space_->Size()) { 279 new_space_->Size()) {
280 isolate_->counters() 280 isolate_->counters()
281 ->gc_compactor_caused_by_oldspace_exhaustion() 281 ->gc_compactor_caused_by_oldspace_exhaustion()
282 ->Increment(); 282 ->Increment();
283 *reason = "scavenge might not succeed"; 283 *reason = "scavenge might not succeed";
284 return MARK_COMPACTOR; 284 return MARK_COMPACTOR;
285 } 285 }
286 286
287 // Default 287 // Default
288 *reason = NULL; 288 *reason = NULL;
289 return SCAVENGER; 289 return YoungGenerationCollector();
290 } 290 }
291 291
292 292
293 // TODO(1238405): Combine the infrastructure for --heap-stats and 293 // TODO(1238405): Combine the infrastructure for --heap-stats and
294 // --log-gc to avoid the complicated preprocessor and flag testing. 294 // --log-gc to avoid the complicated preprocessor and flag testing.
295 void Heap::ReportStatisticsBeforeGC() { 295 void Heap::ReportStatisticsBeforeGC() {
296 // Heap::ReportHeapStatistics will also log NewSpace statistics when 296 // Heap::ReportHeapStatistics will also log NewSpace statistics when
297 // compiled --log-gc is set. The following logic is used to avoid 297 // compiled --log-gc is set. The following logic is used to avoid
298 // double logging. 298 // double logging.
299 #ifdef DEBUG 299 #ifdef DEBUG
(...skipping 515 matching lines...) Expand 10 before | Expand all | Expand 10 after
815 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE); 815 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
816 VMState<EXTERNAL> state(isolate_); 816 VMState<EXTERNAL> state(isolate_);
817 HandleScope handle_scope(isolate_); 817 HandleScope handle_scope(isolate_);
818 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); 818 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
819 } 819 }
820 } 820 }
821 } 821 }
822 822
823 823
824 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) { 824 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
825 if (collector == SCAVENGER) { 825 if (IsYoungGenerationCollector(collector)) {
826 return isolate_->counters()->gc_scavenger(); 826 return isolate_->counters()->gc_scavenger();
827 } else { 827 } else {
828 if (!incremental_marking()->IsStopped()) { 828 if (!incremental_marking()->IsStopped()) {
829 if (ShouldReduceMemory()) { 829 if (ShouldReduceMemory()) {
830 return isolate_->counters()->gc_finalize_reduce_memory(); 830 return isolate_->counters()->gc_finalize_reduce_memory();
831 } else { 831 } else {
832 return isolate_->counters()->gc_finalize(); 832 return isolate_->counters()->gc_finalize();
833 } 833 }
834 } else { 834 } else {
835 return isolate_->counters()->gc_compactor(); 835 return isolate_->counters()->gc_compactor();
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
945 // Reset the allocation timeout to the GC interval, but make sure to 945 // Reset the allocation timeout to the GC interval, but make sure to
946 // allow at least a few allocations after a collection. The reason 946 // allow at least a few allocations after a collection. The reason
947 // for this is that we have a lot of allocation sequences and we 947 // for this is that we have a lot of allocation sequences and we
948 // assume that a garbage collection will allow the subsequent 948 // assume that a garbage collection will allow the subsequent
949 // allocation attempts to go through. 949 // allocation attempts to go through.
950 allocation_timeout_ = Max(6, FLAG_gc_interval); 950 allocation_timeout_ = Max(6, FLAG_gc_interval);
951 #endif 951 #endif
952 952
953 EnsureFillerObjectAtTop(); 953 EnsureFillerObjectAtTop();
954 954
955 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { 955 if (IsYoungGenerationCollector(collector) &&
956 !incremental_marking()->IsStopped()) {
956 if (FLAG_trace_incremental_marking) { 957 if (FLAG_trace_incremental_marking) {
957 isolate()->PrintWithTimestamp( 958 isolate()->PrintWithTimestamp(
958 "[IncrementalMarking] Scavenge during marking.\n"); 959 "[IncrementalMarking] Scavenge during marking.\n");
959 } 960 }
960 } 961 }
961 962
962 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() && 963 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
963 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() && 964 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
964 !incremental_marking()->should_hurry() && FLAG_incremental_marking && 965 !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
965 OldGenerationSpaceAvailable() <= 0) { 966 OldGenerationSpaceAvailable() <= 0) {
966 if (!incremental_marking()->IsComplete() && 967 if (!incremental_marking()->IsComplete() &&
967 !mark_compact_collector()->marking_deque()->IsEmpty() && 968 !mark_compact_collector()->marking_deque()->IsEmpty() &&
968 !FLAG_gc_global) { 969 !FLAG_gc_global) {
969 if (FLAG_trace_incremental_marking) { 970 if (FLAG_trace_incremental_marking) {
970 isolate()->PrintWithTimestamp( 971 isolate()->PrintWithTimestamp(
971 "[IncrementalMarking] Delaying MarkSweep.\n"); 972 "[IncrementalMarking] Delaying MarkSweep.\n");
972 } 973 }
973 collector = SCAVENGER; 974 collector = YoungGenerationCollector();
974 collector_reason = "incremental marking delaying mark-sweep"; 975 collector_reason = "incremental marking delaying mark-sweep";
975 } 976 }
976 } 977 }
977 978
978 bool next_gc_likely_to_collect_more = false; 979 bool next_gc_likely_to_collect_more = false;
979 intptr_t committed_memory_before = 0; 980 intptr_t committed_memory_before = 0;
980 981
981 if (collector == MARK_COMPACTOR) { 982 if (collector == MARK_COMPACTOR) {
982 committed_memory_before = CommittedOldGenerationMemory(); 983 committed_memory_before = CommittedOldGenerationMemory();
983 } 984 }
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1028 if (collector == MARK_COMPACTOR && 1029 if (collector == MARK_COMPACTOR &&
1029 (gc_callback_flags & (kGCCallbackFlagForced | 1030 (gc_callback_flags & (kGCCallbackFlagForced |
1030 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) { 1031 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1031 isolate()->CountUsage(v8::Isolate::kForcedGC); 1032 isolate()->CountUsage(v8::Isolate::kForcedGC);
1032 } 1033 }
1033 1034
1034 // Start incremental marking for the next cycle. The heap snapshot 1035 // Start incremental marking for the next cycle. The heap snapshot
1035 // generator needs incremental marking to stay off after it aborted. 1036 // generator needs incremental marking to stay off after it aborted.
1036 // We do this only for scavenger to avoid a loop where mark-compact 1037 // We do this only for scavenger to avoid a loop where mark-compact
1037 // causes another mark-compact. 1038 // causes another mark-compact.
1038 if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) { 1039 if (IsYoungGenerationCollector(collector) &&
1040 !ShouldAbortIncrementalMarking()) {
1039 StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags, 1041 StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
1040 kNoGCCallbackFlags); 1042 kNoGCCallbackFlags);
1041 } 1043 }
1042 1044
1043 return next_gc_likely_to_collect_more; 1045 return next_gc_likely_to_collect_more;
1044 } 1046 }
1045 1047
1046 1048
1047 int Heap::NotifyContextDisposed(bool dependant_context) { 1049 int Heap::NotifyContextDisposed(bool dependant_context) {
1048 if (!dependant_context) { 1050 if (!dependant_context) {
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
1268 static_cast<double>(start_new_space_size) * 100); 1270 static_cast<double>(start_new_space_size) * 100);
1269 1271
1270 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; 1272 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1271 tracer()->AddSurvivalRatio(survival_rate); 1273 tracer()->AddSurvivalRatio(survival_rate);
1272 } 1274 }
1273 1275
1274 bool Heap::PerformGarbageCollection( 1276 bool Heap::PerformGarbageCollection(
1275 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { 1277 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1276 int freed_global_handles = 0; 1278 int freed_global_handles = 0;
1277 1279
1278 if (collector != SCAVENGER) { 1280 if (!IsYoungGenerationCollector(collector)) {
1279 PROFILE(isolate_, CodeMovingGCEvent()); 1281 PROFILE(isolate_, CodeMovingGCEvent());
1280 } 1282 }
1281 1283
1282 #ifdef VERIFY_HEAP 1284 #ifdef VERIFY_HEAP
1283 if (FLAG_verify_heap) { 1285 if (FLAG_verify_heap) {
1284 VerifyStringTable(this); 1286 VerifyStringTable(this);
1285 } 1287 }
1286 #endif 1288 #endif
1287 1289
1288 GCType gc_type = 1290 GCType gc_type =
(...skipping 10 matching lines...) Expand all
1299 } 1301 }
1300 } 1302 }
1301 1303
1302 EnsureFromSpaceIsCommitted(); 1304 EnsureFromSpaceIsCommitted();
1303 1305
1304 int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); 1306 int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1305 1307
1306 { 1308 {
1307 Heap::PretenuringScope pretenuring_scope(this); 1309 Heap::PretenuringScope pretenuring_scope(this);
1308 1310
1309 if (collector == MARK_COMPACTOR) { 1311 switch (collector) {
1310 UpdateOldGenerationAllocationCounter(); 1312 case MARK_COMPACTOR:
1311 // Perform mark-sweep with optional compaction. 1313 UpdateOldGenerationAllocationCounter();
1312 MarkCompact(); 1314 // Perform mark-sweep with optional compaction.
1313 old_generation_size_configured_ = true; 1315 MarkCompact();
1314 // This should be updated before PostGarbageCollectionProcessing, which 1316 old_generation_size_configured_ = true;
1315 // can cause another GC. Take into account the objects promoted during GC. 1317 // This should be updated before PostGarbageCollectionProcessing, which
1316 old_generation_allocation_counter_at_last_gc_ += 1318 // can cause another GC. Take into account the objects promoted during
1317 static_cast<size_t>(promoted_objects_size_); 1319 // GC.
1318 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); 1320 old_generation_allocation_counter_at_last_gc_ +=
1319 } else { 1321 static_cast<size_t>(promoted_objects_size_);
1320 Scavenge(); 1322 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1323 break;
1324 case MINOR_MARK_COMPACTOR:
1325 MinorMarkCompact();
1326 break;
1327 case SCAVENGER:
1328 Scavenge();
1329 break;
1321 } 1330 }
1322 1331
1323 ProcessPretenuringFeedback(); 1332 ProcessPretenuringFeedback();
1324 } 1333 }
1325 1334
1326 UpdateSurvivalStatistics(start_new_space_size); 1335 UpdateSurvivalStatistics(start_new_space_size);
1327 ConfigureInitialOldGenerationSize(); 1336 ConfigureInitialOldGenerationSize();
1328 1337
1329 isolate_->counters()->objs_since_last_young()->Set(0); 1338 isolate_->counters()->objs_since_last_young()->Set(0);
1330 1339
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1433 1442
1434 LOG(isolate_, ResourceEvent("markcompact", "end")); 1443 LOG(isolate_, ResourceEvent("markcompact", "end"));
1435 1444
1436 MarkCompactEpilogue(); 1445 MarkCompactEpilogue();
1437 1446
1438 if (FLAG_allocation_site_pretenuring) { 1447 if (FLAG_allocation_site_pretenuring) {
1439 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); 1448 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1440 } 1449 }
1441 } 1450 }
1442 1451
1452 void Heap::MinorMarkCompact() { UNREACHABLE(); }
1443 1453
1444 void Heap::MarkCompactEpilogue() { 1454 void Heap::MarkCompactEpilogue() {
1445 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE); 1455 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1446 gc_state_ = NOT_IN_GC; 1456 gc_state_ = NOT_IN_GC;
1447 1457
1448 isolate_->counters()->objs_since_last_full()->Set(0); 1458 isolate_->counters()->objs_since_last_full()->Set(0);
1449 1459
1450 incremental_marking()->Epilogue(); 1460 incremental_marking()->Epilogue();
1451 1461
1452 PreprocessStackTraces(); 1462 PreprocessStackTraces();
(...skipping 5002 matching lines...) Expand 10 before | Expand all | Expand 10 after
6455 } 6465 }
6456 6466
6457 6467
6458 // static 6468 // static
6459 int Heap::GetStaticVisitorIdForMap(Map* map) { 6469 int Heap::GetStaticVisitorIdForMap(Map* map) {
6460 return StaticVisitorBase::GetVisitorId(map); 6470 return StaticVisitorBase::GetVisitorId(map);
6461 } 6471 }
6462 6472
6463 } // namespace internal 6473 } // namespace internal
6464 } // namespace v8 6474 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698