Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(348)

Side by Side Diff: src/heap/incremental-marking.cc

Issue 1420423009: [heap] Black allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/incremental-marking.h" 5 #include "src/heap/incremental-marking.h"
6 6
7 #include "src/code-stubs.h" 7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h" 8 #include "src/compilation-cache.h"
9 #include "src/conversions.h" 9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h" 10 #include "src/heap/gc-idle-time-handler.h"
(...skipping 24 matching lines...) Expand all
35 bytes_rescanned_(0), 35 bytes_rescanned_(0),
36 should_hurry_(false), 36 should_hurry_(false),
37 marking_speed_(0), 37 marking_speed_(0),
38 bytes_scanned_(0), 38 bytes_scanned_(0),
39 allocated_(0), 39 allocated_(0),
40 write_barriers_invoked_since_last_step_(0), 40 write_barriers_invoked_since_last_step_(0),
41 idle_marking_delay_counter_(0), 41 idle_marking_delay_counter_(0),
42 no_marking_scope_depth_(0), 42 no_marking_scope_depth_(0),
43 unscanned_bytes_of_large_object_(0), 43 unscanned_bytes_of_large_object_(0),
44 was_activated_(false), 44 was_activated_(false),
45 black_allocation_(false),
45 finalize_marking_completed_(false), 46 finalize_marking_completed_(false),
46 incremental_marking_finalization_rounds_(0), 47 incremental_marking_finalization_rounds_(0),
47 request_type_(COMPLETE_MARKING) {} 48 request_type_(COMPLETE_MARKING) {}
48 49
49 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { 50 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
50 HeapObject* value_heap_obj = HeapObject::cast(value); 51 HeapObject* value_heap_obj = HeapObject::cast(value);
51 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); 52 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
52 DCHECK(!Marking::IsImpossible(value_bit)); 53 DCHECK(!Marking::IsImpossible(value_bit));
53 54
54 MarkBit obj_bit = Marking::MarkBitFrom(obj); 55 MarkBit obj_bit = Marking::MarkBitFrom(obj);
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); 315 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
315 if (Marking::IsWhite(mark_bit)) { 316 if (Marking::IsWhite(mark_bit)) {
316 Marking::MarkBlack(mark_bit); 317 Marking::MarkBlack(mark_bit);
317 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size()); 318 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
318 return true; 319 return true;
319 } 320 }
320 return false; 321 return false;
321 } 322 }
322 }; 323 };
323 324
325 void IncrementalMarking::IterateBlackObject(HeapObject* object) {
326 if (black_allocation() &&
327 Page::FromAddress(object->address())->IsFlagSet(Page::BLACK_PAGE)) {
328 IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
329 }
330 }
324 331
325 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { 332 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
326 public: 333 public:
327 explicit IncrementalMarkingRootMarkingVisitor( 334 explicit IncrementalMarkingRootMarkingVisitor(
328 IncrementalMarking* incremental_marking) 335 IncrementalMarking* incremental_marking)
329 : heap_(incremental_marking->heap()) {} 336 : heap_(incremental_marking->heap()) {}
330 337
331 void VisitPointer(Object** p) override { MarkObjectByPointer(p); } 338 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
332 339
333 void VisitPointers(Object** start, Object** end) override { 340 void VisitPointers(Object** start, Object** end) override {
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 if (FLAG_cleanup_code_caches_at_gc) { 595 if (FLAG_cleanup_code_caches_at_gc) {
589 // We will mark cache black with a separate pass 596 // We will mark cache black with a separate pass
590 // when we finish marking. 597 // when we finish marking.
591 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); 598 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
592 } 599 }
593 600
594 // Mark strong roots grey. 601 // Mark strong roots grey.
595 IncrementalMarkingRootMarkingVisitor visitor(this); 602 IncrementalMarkingRootMarkingVisitor visitor(this);
596 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); 603 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
597 604
605 if (FLAG_black_allocation) {
606 StartBlackAllocation();
607 }
608
598 // Ready to start incremental marking. 609 // Ready to start incremental marking.
599 if (FLAG_trace_incremental_marking) { 610 if (FLAG_trace_incremental_marking) {
600 PrintF("[IncrementalMarking] Running\n"); 611 PrintF("[IncrementalMarking] Running\n");
601 } 612 }
602 } 613 }
603 614
615 void IncrementalMarking::StartBlackAllocation() {
616 if (heap_->isolate()->serializer_enabled()) {
617 if (FLAG_trace_incremental_marking) {
618 PrintF("[IncrementalMarking] Black allocation delayed - serializer\n");
619 }
620 return;
621 }
622 DCHECK(FLAG_black_allocation);
623 DCHECK(IsMarking());
624 black_allocation_ = true;
625 PagedSpaces spaces(heap());
626 for (PagedSpace* space = spaces.next(); space != NULL;
627 space = spaces.next()) {
628 space->EmptyAllocationInfo();
629 space->free_list()->Reset();
630 }
631 if (FLAG_trace_incremental_marking) {
632 PrintF("[IncrementalMarking] Black allocation started\n");
633 }
634 }
635
636 void IncrementalMarking::FinishBlackAllocation() {
637 black_allocation_ = false;
638 if (FLAG_trace_incremental_marking) {
639 PrintF("[IncrementalMarking] Black allocation finished\n");
640 }
641 }
604 642
605 void IncrementalMarking::MarkRoots() { 643 void IncrementalMarking::MarkRoots() {
606 DCHECK(!finalize_marking_completed_); 644 DCHECK(!finalize_marking_completed_);
607 DCHECK(IsMarking()); 645 DCHECK(IsMarking());
608 646
609 IncrementalMarkingRootMarkingVisitor visitor(this); 647 IncrementalMarkingRootMarkingVisitor visitor(this);
610 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); 648 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
611 } 649 }
612 650
613 651
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
796 // Only pointers to from space have to be updated. 834 // Only pointers to from space have to be updated.
797 if (heap_->InFromSpace(obj)) { 835 if (heap_->InFromSpace(obj)) {
798 MapWord map_word = obj->map_word(); 836 MapWord map_word = obj->map_word();
799 // There may be objects on the marking deque that do not exist anymore, 837 // There may be objects on the marking deque that do not exist anymore,
800 // e.g. left trimmed objects or objects from the root set (frames). 838 // e.g. left trimmed objects or objects from the root set (frames).
801 // If these object are dead at scavenging time, their marking deque 839 // If these object are dead at scavenging time, their marking deque
802 // entries will not point to forwarding addresses. Hence, we can discard 840 // entries will not point to forwarding addresses. Hence, we can discard
803 // them. 841 // them.
804 if (map_word.IsForwardingAddress()) { 842 if (map_word.IsForwardingAddress()) {
805 HeapObject* dest = map_word.ToForwardingAddress(); 843 HeapObject* dest = map_word.ToForwardingAddress();
844 if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
845 continue;
806 array[new_top] = dest; 846 array[new_top] = dest;
807 new_top = ((new_top + 1) & mask); 847 new_top = ((new_top + 1) & mask);
808 DCHECK(new_top != marking_deque->bottom()); 848 DCHECK(new_top != marking_deque->bottom());
809 #ifdef DEBUG 849 #ifdef DEBUG
810 MarkBit mark_bit = Marking::MarkBitFrom(obj); 850 MarkBit mark_bit = Marking::MarkBitFrom(obj);
811 DCHECK(Marking::IsGrey(mark_bit) || 851 DCHECK(Marking::IsGrey(mark_bit) ||
812 (obj->IsFiller() && Marking::IsWhite(mark_bit))); 852 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
813 #endif 853 #endif
814 } 854 }
815 } else if (obj->map() != filler_map) { 855 } else if (obj->map() != filler_map) {
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 // correct only for objects that occupy at least two words. 935 // correct only for objects that occupy at least two words.
896 Map* map = obj->map(); 936 Map* map = obj->map();
897 if (map == filler_map) continue; 937 if (map == filler_map) continue;
898 938
899 VisitObject(map, obj, obj->SizeFromMap(map)); 939 VisitObject(map, obj, obj->SizeFromMap(map));
900 } 940 }
901 } 941 }
902 942
903 943
904 void IncrementalMarking::Hurry() { 944 void IncrementalMarking::Hurry() {
905 if (state() == MARKING) { 945 // A scavenge may have pushed new objects on the marking deque (due to black
946 // allocation) even in COMPLETE state. This may happen if scavenges are
947 // forced e.g. in tests. It should not happen when COMPLETE was set when
948 // incremental marking finished and a regular GC was triggered after that
949 // because should_hurry_ will force a full GC.
950 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
906 double start = 0.0; 951 double start = 0.0;
907 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { 952 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
908 start = heap_->MonotonicallyIncreasingTimeInMs(); 953 start = heap_->MonotonicallyIncreasingTimeInMs();
909 if (FLAG_trace_incremental_marking) { 954 if (FLAG_trace_incremental_marking) {
910 PrintF("[IncrementalMarking] Hurry\n"); 955 PrintF("[IncrementalMarking] Hurry\n");
911 } 956 }
912 } 957 }
913 // TODO(gc) hurry can mark objects it encounters black as mutator 958 // TODO(gc) hurry can mark objects it encounters black as mutator
914 // was stopped. 959 // was stopped.
915 ProcessMarkingDeque(); 960 ProcessMarkingDeque();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
960 IncrementalMarking::set_should_hurry(false); 1005 IncrementalMarking::set_should_hurry(false);
961 ResetStepCounters(); 1006 ResetStepCounters();
962 if (IsMarking()) { 1007 if (IsMarking()) {
963 PatchIncrementalMarkingRecordWriteStubs(heap_, 1008 PatchIncrementalMarkingRecordWriteStubs(heap_,
964 RecordWriteStub::STORE_BUFFER_ONLY); 1009 RecordWriteStub::STORE_BUFFER_ONLY);
965 DeactivateIncrementalWriteBarrier(); 1010 DeactivateIncrementalWriteBarrier();
966 } 1011 }
967 heap_->isolate()->stack_guard()->ClearGC(); 1012 heap_->isolate()->stack_guard()->ClearGC();
968 state_ = STOPPED; 1013 state_ = STOPPED;
969 is_compacting_ = false; 1014 is_compacting_ = false;
1015 FinishBlackAllocation();
970 } 1016 }
971 1017
972 1018
973 void IncrementalMarking::Finalize() { 1019 void IncrementalMarking::Finalize() {
974 Hurry(); 1020 Hurry();
975 Stop(); 1021 Stop();
976 } 1022 }
977 1023
978 1024
979 void IncrementalMarking::FinalizeMarking(CompletionAction action) { 1025 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
1179 if (heap_->mark_compact_collector()->sweeping_in_progress() && 1225 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1180 (heap_->mark_compact_collector()->IsSweepingCompleted() || 1226 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
1181 !FLAG_concurrent_sweeping)) { 1227 !FLAG_concurrent_sweeping)) {
1182 heap_->mark_compact_collector()->EnsureSweepingCompleted(); 1228 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1183 } 1229 }
1184 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { 1230 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1185 bytes_scanned_ = 0; 1231 bytes_scanned_ = 0;
1186 StartMarking(); 1232 StartMarking();
1187 } 1233 }
1188 } else if (state_ == MARKING) { 1234 } else if (state_ == MARKING) {
1235 if (FLAG_black_allocation && !black_allocation()) {
1236 // If black allocation was not started when incremental marking started
1237 // start it now.
1238 StartBlackAllocation();
1239 }
1189 bytes_processed = ProcessMarkingDeque(bytes_to_process); 1240 bytes_processed = ProcessMarkingDeque(bytes_to_process);
1190 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) { 1241 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1191 if (completion == FORCE_COMPLETION || 1242 if (completion == FORCE_COMPLETION ||
1192 IsIdleMarkingDelayCounterLimitReached()) { 1243 IsIdleMarkingDelayCounterLimitReached()) {
1193 if (!finalize_marking_completed_) { 1244 if (!finalize_marking_completed_) {
1194 FinalizeMarking(action); 1245 FinalizeMarking(action);
1195 } else { 1246 } else {
1196 MarkingComplete(action); 1247 MarkingComplete(action);
1197 } 1248 }
1198 } else { 1249 } else {
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1244 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { 1295 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1245 idle_marking_delay_counter_++; 1296 idle_marking_delay_counter_++;
1246 } 1297 }
1247 1298
1248 1299
1249 void IncrementalMarking::ClearIdleMarkingDelayCounter() { 1300 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1250 idle_marking_delay_counter_ = 0; 1301 idle_marking_delay_counter_ = 0;
1251 } 1302 }
1252 } // namespace internal 1303 } // namespace internal
1253 } // namespace v8 1304 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698