Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(978)

Side by Side Diff: src/incremental-marking.cc

Issue 11028027: Revert trunk to bleeding_edge at r12484 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/incremental-marking.h ('k') | src/incremental-marking-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 26 matching lines...) Expand all
37 37
38 namespace v8 { 38 namespace v8 {
39 namespace internal { 39 namespace internal {
40 40
41 41
42 IncrementalMarking::IncrementalMarking(Heap* heap) 42 IncrementalMarking::IncrementalMarking(Heap* heap)
43 : heap_(heap), 43 : heap_(heap),
44 state_(STOPPED), 44 state_(STOPPED),
45 marking_deque_memory_(NULL), 45 marking_deque_memory_(NULL),
46 marking_deque_memory_committed_(false), 46 marking_deque_memory_committed_(false),
47 marker_(this, heap->mark_compact_collector()),
47 steps_count_(0), 48 steps_count_(0),
48 steps_took_(0), 49 steps_took_(0),
49 longest_step_(0.0), 50 longest_step_(0.0),
50 old_generation_space_available_at_start_of_incremental_(0), 51 old_generation_space_available_at_start_of_incremental_(0),
51 old_generation_space_used_at_start_of_incremental_(0), 52 old_generation_space_used_at_start_of_incremental_(0),
52 steps_count_since_last_gc_(0), 53 steps_count_since_last_gc_(0),
53 steps_took_since_last_gc_(0), 54 steps_took_since_last_gc_(0),
54 should_hurry_(false), 55 should_hurry_(false),
55 marking_speed_(0), 56 allocation_marking_factor_(0),
56 allocated_(0), 57 allocated_(0),
57 no_marking_scope_depth_(0) { 58 no_marking_scope_depth_(0) {
58 } 59 }
59 60
60 61
61 void IncrementalMarking::TearDown() { 62 void IncrementalMarking::TearDown() {
62 delete marking_deque_memory_; 63 delete marking_deque_memory_;
63 } 64 }
64 65
65 66
66 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, 67 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
67 Object** slot, 68 Object** slot,
68 Object* value) { 69 Object* value) {
69 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { 70 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
70 MarkBit obj_bit = Marking::MarkBitFrom(obj); 71 MarkBit obj_bit = Marking::MarkBitFrom(obj);
71 if (Marking::IsBlack(obj_bit)) { 72 if (Marking::IsBlack(obj_bit)) {
72 // Object is not going to be rescanned we need to record the slot. 73 // Object is not going to be rescanned we need to record the slot.
73 heap_->mark_compact_collector()->RecordSlot( 74 heap_->mark_compact_collector()->RecordSlot(
74 HeapObject::RawField(obj, 0), slot, value); 75 HeapObject::RawField(obj, 0), slot, value);
75 } 76 }
76 } 77 }
77 } 78 }
78 79
79 80
80 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, 81 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
81 Object* value, 82 Object* value,
82 Isolate* isolate) { 83 Isolate* isolate) {
83 ASSERT(obj->IsHeapObject()); 84 ASSERT(obj->IsHeapObject());
85
86 // Fast cases should already be covered by RecordWriteStub.
87 ASSERT(value->IsHeapObject());
88 ASSERT(!value->IsHeapNumber());
89 ASSERT(!value->IsString() ||
90 value->IsConsString() ||
91 value->IsSlicedString());
92 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
93
84 IncrementalMarking* marking = isolate->heap()->incremental_marking(); 94 IncrementalMarking* marking = isolate->heap()->incremental_marking();
85 ASSERT(!marking->is_compacting_); 95 ASSERT(!marking->is_compacting_);
86
87 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
88 int counter = chunk->write_barrier_counter();
89 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
90 marking->write_barriers_invoked_since_last_step_ +=
91 MemoryChunk::kWriteBarrierCounterGranularity -
92 chunk->write_barrier_counter();
93 chunk->set_write_barrier_counter(
94 MemoryChunk::kWriteBarrierCounterGranularity);
95 }
96
97 marking->RecordWrite(obj, NULL, value); 96 marking->RecordWrite(obj, NULL, value);
98 } 97 }
99 98
100 99
101 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, 100 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
102 Object** slot, 101 Object** slot,
103 Isolate* isolate) { 102 Isolate* isolate) {
104 ASSERT(obj->IsHeapObject());
105 IncrementalMarking* marking = isolate->heap()->incremental_marking(); 103 IncrementalMarking* marking = isolate->heap()->incremental_marking();
106 ASSERT(marking->is_compacting_); 104 ASSERT(marking->is_compacting_);
107
108 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
109 int counter = chunk->write_barrier_counter();
110 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
111 marking->write_barriers_invoked_since_last_step_ +=
112 MemoryChunk::kWriteBarrierCounterGranularity -
113 chunk->write_barrier_counter();
114 chunk->set_write_barrier_counter(
115 MemoryChunk::kWriteBarrierCounterGranularity);
116 }
117
118 marking->RecordWrite(obj, slot, *slot); 105 marking->RecordWrite(obj, slot, *slot);
119 } 106 }
120 107
121 108
122 void IncrementalMarking::RecordCodeTargetPatch(Code* host, 109 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
123 Address pc, 110 Address pc,
124 HeapObject* value) { 111 HeapObject* value) {
125 if (IsMarking()) { 112 if (IsMarking()) {
126 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 113 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
127 RecordWriteIntoCode(host, &rinfo, value); 114 RecordWriteIntoCode(host, &rinfo, value);
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 170
184 table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); 171 table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
185 172
186 table_.Register(kVisitJSFunction, &VisitJSFunction); 173 table_.Register(kVisitJSFunction, &VisitJSFunction);
187 174
188 table_.Register(kVisitJSRegExp, &VisitJSRegExp); 175 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
189 } 176 }
190 177
191 static void VisitJSWeakMap(Map* map, HeapObject* object) { 178 static void VisitJSWeakMap(Map* map, HeapObject* object) {
192 Heap* heap = map->GetHeap(); 179 Heap* heap = map->GetHeap();
193 Object** start_slot =
194 HeapObject::RawField(object, JSWeakMap::kPropertiesOffset);
195 VisitPointers(heap, 180 VisitPointers(heap,
196 start_slot, 181 HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
197 start_slot,
198 HeapObject::RawField(object, JSWeakMap::kSize)); 182 HeapObject::RawField(object, JSWeakMap::kSize));
199 } 183 }
200 184
201 static void VisitSharedFunctionInfo(Map* map, HeapObject* object) { 185 static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
202 Heap* heap = map->GetHeap(); 186 Heap* heap = map->GetHeap();
203 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object); 187 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
204 if (shared->ic_age() != heap->global_ic_age()) { 188 if (shared->ic_age() != heap->global_ic_age()) {
205 shared->ResetForNewContext(heap->global_ic_age()); 189 shared->ResetForNewContext(heap->global_ic_age());
206 } 190 }
207 FixedBodyVisitor<IncrementalMarkingMarkingVisitor, 191 FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
208 SharedFunctionInfo::BodyDescriptor, 192 SharedFunctionInfo::BodyDescriptor,
209 void>::Visit(map, object); 193 void>::Visit(map, object);
210 } 194 }
211 195
212 static const int kScanningChunk = 32 * 1024;
213
214 static int VisitHugeArray(FixedArray* array) {
215 Heap* heap = array->GetHeap();
216 MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
217 Object** start_slot = array->data_start();
218 int length = array->length();
219
220 if (chunk->owner()->identity() != LO_SPACE) {
221 VisitPointers(heap, start_slot, start_slot, start_slot + length);
222 return length;
223 }
224
225 int from =
226 chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
227 int to = Min(from + kScanningChunk, length);
228
229 VisitPointers(heap, start_slot, start_slot + from, start_slot + to);
230
231 if (to == length) {
232 // If it went from black to grey while it was waiting for the next bit to
233 // be scanned then we have to start the scan again.
234 MarkBit mark_bit = Marking::MarkBitFrom(array);
235 if (!Marking::IsBlack(mark_bit)) {
236 ASSERT(Marking::IsGrey(mark_bit));
237 chunk->SetPartiallyScannedProgress(0);
238 } else {
239 chunk->SetCompletelyScanned();
240 }
241 } else {
242 chunk->SetPartiallyScannedProgress(to);
243 }
244 return to - from;
245 }
246
247 static inline void VisitJSFunction(Map* map, HeapObject* object) { 196 static inline void VisitJSFunction(Map* map, HeapObject* object) {
248 Heap* heap = map->GetHeap(); 197 Heap* heap = map->GetHeap();
249 // Iterate over all fields in the body but take care in dealing with 198 // Iterate over all fields in the body but take care in dealing with
250 // the code entry and skip weak fields. 199 // the code entry and skip weak fields.
251 Object** start_slot =
252 HeapObject::RawField(object, JSFunction::kPropertiesOffset);
253 VisitPointers(heap, 200 VisitPointers(heap,
254 start_slot, 201 HeapObject::RawField(object, JSFunction::kPropertiesOffset),
255 start_slot,
256 HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); 202 HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
257 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); 203 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
258 VisitPointers(heap, 204 VisitPointers(heap,
259 start_slot,
260 HeapObject::RawField(object, 205 HeapObject::RawField(object,
261 JSFunction::kCodeEntryOffset + kPointerSize), 206 JSFunction::kCodeEntryOffset + kPointerSize),
262 HeapObject::RawField(object, 207 HeapObject::RawField(object,
263 JSFunction::kNonWeakFieldsEndOffset)); 208 JSFunction::kNonWeakFieldsEndOffset));
264 } 209 }
265 210
266 INLINE(static void VisitPointer(Heap* heap, Object** p)) { 211 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
267 Object* obj = *p; 212 Object* obj = *p;
268 if (obj->NonFailureIsHeapObject()) { 213 if (obj->NonFailureIsHeapObject()) {
269 heap->mark_compact_collector()->RecordSlot(p, p, obj); 214 heap->mark_compact_collector()->RecordSlot(p, p, obj);
270 MarkObject(heap, obj); 215 MarkObject(heap, obj);
271 } 216 }
272 } 217 }
273 218
274 INLINE(static void VisitPointers(Heap* heap, 219 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
275 Object** anchor,
276 Object** start,
277 Object** end)) {
278 for (Object** p = start; p < end; p++) { 220 for (Object** p = start; p < end; p++) {
279 Object* obj = *p; 221 Object* obj = *p;
280 if (obj->NonFailureIsHeapObject()) { 222 if (obj->NonFailureIsHeapObject()) {
281 heap->mark_compact_collector()->RecordSlot(anchor, p, obj); 223 heap->mark_compact_collector()->RecordSlot(start, p, obj);
282 MarkObject(heap, obj); 224 MarkObject(heap, obj);
283 } 225 }
284 } 226 }
285 } 227 }
286 228
287 // Marks the object grey and pushes it on the marking stack.
288 INLINE(static void MarkObject(Heap* heap, Object* obj)) { 229 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
289 HeapObject* heap_object = HeapObject::cast(obj); 230 HeapObject* heap_object = HeapObject::cast(obj);
290 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); 231 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
291 if (mark_bit.data_only()) { 232 if (mark_bit.data_only()) {
292 if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) { 233 if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
293 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), 234 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
294 heap_object->Size()); 235 heap_object->Size());
295 } 236 }
296 } else if (Marking::IsWhite(mark_bit)) { 237 } else if (Marking::IsWhite(mark_bit)) {
297 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); 238 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
298 } 239 }
299 } 240 }
300
301 // Marks the object black without pushing it on the marking stack.
302 // Returns true if object needed marking and false otherwise.
303 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
304 HeapObject* heap_object = HeapObject::cast(obj);
305 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
306 if (Marking::IsWhite(mark_bit)) {
307 mark_bit.Set();
308 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
309 heap_object->Size());
310 return true;
311 }
312 return false;
313 }
314 }; 241 };
315 242
316 243
317 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { 244 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
318 public: 245 public:
319 IncrementalMarkingRootMarkingVisitor(Heap* heap, 246 IncrementalMarkingRootMarkingVisitor(Heap* heap,
320 IncrementalMarking* incremental_marking) 247 IncrementalMarking* incremental_marking)
321 : heap_(heap), 248 : heap_(heap),
322 incremental_marking_(incremental_marking) { 249 incremental_marking_(incremental_marking) {
323 } 250 }
(...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after
673 } 600 }
674 } else if (obj->map() != filler_map) { 601 } else if (obj->map() != filler_map) {
675 // Skip one word filler objects that appear on the 602 // Skip one word filler objects that appear on the
676 // stack when we perform in place array shift. 603 // stack when we perform in place array shift.
677 array[new_top] = obj; 604 array[new_top] = obj;
678 new_top = ((new_top + 1) & mask); 605 new_top = ((new_top + 1) & mask);
679 ASSERT(new_top != marking_deque_.bottom()); 606 ASSERT(new_top != marking_deque_.bottom());
680 #ifdef DEBUG 607 #ifdef DEBUG
681 MarkBit mark_bit = Marking::MarkBitFrom(obj); 608 MarkBit mark_bit = Marking::MarkBitFrom(obj);
682 ASSERT(Marking::IsGrey(mark_bit) || 609 ASSERT(Marking::IsGrey(mark_bit) ||
683 (obj->IsFiller() && Marking::IsWhite(mark_bit)) || 610 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
684 MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
685 #endif 611 #endif
686 } 612 }
687 } 613 }
688 marking_deque_.set_top(new_top); 614 marking_deque_.set_top(new_top);
689 615
690 steps_took_since_last_gc_ = 0; 616 steps_took_since_last_gc_ = 0;
691 steps_count_since_last_gc_ = 0; 617 steps_count_since_last_gc_ = 0;
692 longest_step_ = 0.0; 618 longest_step_ = 0.0;
693 } 619 }
694 620
695 621
696 void IncrementalMarking::Hurry() { 622 void IncrementalMarking::Hurry() {
697 if (state() == MARKING) { 623 if (state() == MARKING) {
698 double start = 0.0; 624 double start = 0.0;
699 if (FLAG_trace_incremental_marking) { 625 if (FLAG_trace_incremental_marking) {
700 PrintF("[IncrementalMarking] Hurry\n"); 626 PrintF("[IncrementalMarking] Hurry\n");
701 start = OS::TimeCurrentMillis(); 627 start = OS::TimeCurrentMillis();
702 } 628 }
703 // TODO(gc) hurry can mark objects it encounters black as mutator 629 // TODO(gc) hurry can mark objects it encounters black as mutator
704 // was stopped. 630 // was stopped.
705 Map* filler_map = heap_->one_pointer_filler_map(); 631 Map* filler_map = heap_->one_pointer_filler_map();
706 Map* native_context_map = heap_->native_context_map(); 632 Map* native_context_map = heap_->native_context_map();
707 do { 633 while (!marking_deque_.IsEmpty()) {
708 while (!marking_deque_.IsEmpty()) { 634 HeapObject* obj = marking_deque_.Pop();
709 HeapObject* obj = marking_deque_.Pop();
710 635
711 // Explicitly skip one word fillers. Incremental markbit patterns are 636 // Explicitly skip one word fillers. Incremental markbit patterns are
712 // correct only for objects that occupy at least two words. 637 // correct only for objects that occupy at least two words.
713 Map* map = obj->map(); 638 Map* map = obj->map();
714 if (map == filler_map) { 639 if (map == filler_map) {
715 continue; 640 continue;
716 } else if (map == native_context_map) { 641 } else if (map == native_context_map) {
717 // Native contexts have weak fields. 642 // Native contexts have weak fields.
718 IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj); 643 IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
719 ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj))); 644 } else if (map->instance_type() == MAP_TYPE) {
720 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); 645 Map* map = Map::cast(obj);
721 } else if (map->instance_type() == FIXED_ARRAY_TYPE && 646 heap_->ClearCacheOnMap(map);
722 FixedArray::cast(obj)->length() > 647
723 IncrementalMarkingMarkingVisitor::kScanningChunk) { 648 // When map collection is enabled we have to mark through map's
724 MarkBit map_mark_bit = Marking::MarkBitFrom(map); 649 // transitions and back pointers in a special way to make these links
725 if (Marking::IsWhite(map_mark_bit)) { 650 // weak. Only maps for subclasses of JSReceiver can have transitions.
726 WhiteToGreyAndPush(map, map_mark_bit); 651 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
727 } 652 if (FLAG_collect_maps &&
728 MarkBit mark_bit = Marking::MarkBitFrom(obj); 653 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
729 if (!Marking::IsBlack(mark_bit)) { 654 marker_.MarkMapContents(map);
730 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); 655 } else {
731 } else { 656 IncrementalMarkingMarkingVisitor::VisitPointers(
732 ASSERT( 657 heap_,
733 MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned()); 658 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
734 } 659 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
735 IncrementalMarkingMarkingVisitor::VisitHugeArray(
736 FixedArray::cast(obj));
737 } else {
738 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
739 if (Marking::IsWhite(map_mark_bit)) {
740 WhiteToGreyAndPush(map, map_mark_bit);
741 }
742 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
743 ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
744 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
745 } 660 }
661 } else {
662 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
663 if (Marking::IsWhite(map_mark_bit)) {
664 WhiteToGreyAndPush(map, map_mark_bit);
665 }
666 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
667 }
746 668
747 MarkBit mark_bit = Marking::MarkBitFrom(obj); 669 MarkBit mark_bit = Marking::MarkBitFrom(obj);
748 Marking::MarkBlack(mark_bit); 670 ASSERT(!Marking::IsBlack(mark_bit));
749 } 671 Marking::MarkBlack(mark_bit);
750 state_ = COMPLETE; 672 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
751 if (FLAG_trace_incremental_marking) { 673 }
752 double end = OS::TimeCurrentMillis(); 674 state_ = COMPLETE;
753 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", 675 if (FLAG_trace_incremental_marking) {
754 static_cast<int>(end - start)); 676 double end = OS::TimeCurrentMillis();
755 } 677 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
756 MarkCompactCollector::ProcessLargePostponedArrays(heap_, &marking_deque_); 678 static_cast<int>(end - start));
757 } while (!marking_deque_.IsEmpty()); 679 }
758 } 680 }
759 681
760 if (FLAG_cleanup_code_caches_at_gc) { 682 if (FLAG_cleanup_code_caches_at_gc) {
761 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); 683 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
762 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); 684 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
763 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(), 685 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
764 PolymorphicCodeCache::kSize); 686 PolymorphicCodeCache::kSize);
765 } 687 }
766 688
767 Object* context = heap_->native_contexts_list(); 689 Object* context = heap_->native_contexts_list();
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
847 CompletionAction action) { 769 CompletionAction action) {
848 if (heap_->gc_state() != Heap::NOT_IN_GC || 770 if (heap_->gc_state() != Heap::NOT_IN_GC ||
849 !FLAG_incremental_marking || 771 !FLAG_incremental_marking ||
850 !FLAG_incremental_marking_steps || 772 !FLAG_incremental_marking_steps ||
851 (state_ != SWEEPING && state_ != MARKING)) { 773 (state_ != SWEEPING && state_ != MARKING)) {
852 return; 774 return;
853 } 775 }
854 776
855 allocated_ += allocated_bytes; 777 allocated_ += allocated_bytes;
856 778
857 if (allocated_ < kAllocatedThreshold && 779 if (allocated_ < kAllocatedThreshold) return;
858 write_barriers_invoked_since_last_step_ <
859 kWriteBarriersInvokedThreshold) {
860 return;
861 }
862 780
863 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; 781 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
864 782
865 // The marking speed is driven either by the allocation rate or by the rate 783 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
866 // at which we are having to check the color of objects in the write barrier.
867 // It is possible for a tight non-allocating loop to run a lot of write
868 // barriers before we get here and check them (marking can only take place on
869 // allocation), so to reduce the lumpiness we don't use the write barriers
870 // invoked since last step directly to determine the amount of work to do.
871 intptr_t bytes_to_process =
872 marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
873 allocated_ = 0;
874 write_barriers_invoked_since_last_step_ = 0;
875
876 bytes_scanned_ += bytes_to_process; 784 bytes_scanned_ += bytes_to_process;
877 785
878 double start = 0; 786 double start = 0;
879 787
880 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { 788 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
881 start = OS::TimeCurrentMillis(); 789 start = OS::TimeCurrentMillis();
882 } 790 }
883 791
884 if (state_ == SWEEPING) { 792 if (state_ == SWEEPING) {
885 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) { 793 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
886 bytes_scanned_ = 0; 794 bytes_scanned_ = 0;
887 StartMarking(PREVENT_COMPACTION); 795 StartMarking(PREVENT_COMPACTION);
888 } 796 }
889 } else if (state_ == MARKING) { 797 } else if (state_ == MARKING) {
890 Map* filler_map = heap_->one_pointer_filler_map(); 798 Map* filler_map = heap_->one_pointer_filler_map();
891 Map* native_context_map = heap_->native_context_map(); 799 Map* native_context_map = heap_->native_context_map();
892 while (true) { 800 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
893 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { 801 HeapObject* obj = marking_deque_.Pop();
894 HeapObject* obj = marking_deque_.Pop();
895 802
896 // Explicitly skip one word fillers. Incremental markbit patterns are 803 // Explicitly skip one word fillers. Incremental markbit patterns are
897 // correct only for objects that occupy at least two words. 804 // correct only for objects that occupy at least two words.
898 Map* map = obj->map(); 805 Map* map = obj->map();
899 if (map == filler_map) continue; 806 if (map == filler_map) continue;
900 807
901 int size = obj->SizeFromMap(map); 808 int size = obj->SizeFromMap(map);
902 MarkBit map_mark_bit = Marking::MarkBitFrom(map); 809 bytes_to_process -= size;
903 if (Marking::IsWhite(map_mark_bit)) { 810 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
904 WhiteToGreyAndPush(map, map_mark_bit); 811 if (Marking::IsWhite(map_mark_bit)) {
812 WhiteToGreyAndPush(map, map_mark_bit);
813 }
814
815 // TODO(gc) switch to static visitor instead of normal visitor.
816 if (map == native_context_map) {
817 // Native contexts have weak fields.
818 Context* ctx = Context::cast(obj);
819
820 // We will mark cache black with a separate pass
821 // when we finish marking.
822 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
823
824 IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
825 } else if (map->instance_type() == MAP_TYPE) {
826 Map* map = Map::cast(obj);
827 heap_->ClearCacheOnMap(map);
828
829 // When map collection is enabled we have to mark through map's
830 // transitions and back pointers in a special way to make these links
831 // weak. Only maps for subclasses of JSReceiver can have transitions.
832 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
833 if (FLAG_collect_maps &&
834 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
835 marker_.MarkMapContents(map);
836 } else {
837 IncrementalMarkingMarkingVisitor::VisitPointers(
838 heap_,
839 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
840 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
905 } 841 }
842 } else {
843 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
844 }
906 845
907 // TODO(gc) switch to static visitor instead of normal visitor. 846 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
908 if (map == native_context_map) { 847 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
909 // Native contexts have weak fields. 848 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
910 Context* ctx = Context::cast(obj); 849 Marking::MarkBlack(obj_mark_bit);
850 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
851 }
852 if (marking_deque_.IsEmpty()) MarkingComplete(action);
853 }
911 854
912 // We will mark cache black with a separate pass 855 allocated_ = 0;
913 // when we finish marking.
914 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
915
916 IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
917 bytes_to_process -= size;
918 SLOW_ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
919 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
920 } else if (map->instance_type() == FIXED_ARRAY_TYPE &&
921 FixedArray::cast(obj)->length() >
922 IncrementalMarkingMarkingVisitor::kScanningChunk) {
923 SLOW_ASSERT(
924 Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
925 MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
926 bytes_to_process -=
927 IncrementalMarkingMarkingVisitor::VisitHugeArray(
928 FixedArray::cast(obj));
929 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
930 if (!Marking::IsBlack(obj_mark_bit)) {
931 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
932 }
933 } else {
934 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
935 bytes_to_process -= size;
936 SLOW_ASSERT(
937 Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
938 (obj->IsFiller() && Marking::IsWhite(Marking::MarkBitFrom(obj))));
939 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
940 }
941
942 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
943 Marking::MarkBlack(obj_mark_bit);
944 }
945 if (marking_deque_.IsEmpty()) {
946 MarkCompactCollector::ProcessLargePostponedArrays(heap_,
947 &marking_deque_);
948 if (marking_deque_.IsEmpty()) {
949 MarkingComplete(action);
950 break;
951 }
952 } else {
953 ASSERT(bytes_to_process <= 0);
954 break;
955 }
956 }
957 }
958 856
959 steps_count_++; 857 steps_count_++;
960 steps_count_since_last_gc_++; 858 steps_count_since_last_gc_++;
961 859
962 bool speed_up = false; 860 bool speed_up = false;
963 861
964 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { 862 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
965 if (FLAG_trace_gc) { 863 if (FLAG_trace_gc) {
966 PrintPID("Speed up marking after %d steps\n", 864 PrintPID("Speed up marking after %d steps\n",
967 static_cast<int>(kMarkingSpeedAccellerationInterval)); 865 static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
968 } 866 }
969 speed_up = true; 867 speed_up = true;
970 } 868 }
971 869
972 bool space_left_is_very_small = 870 bool space_left_is_very_small =
973 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); 871 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
974 872
975 bool only_1_nth_of_space_that_was_available_still_left = 873 bool only_1_nth_of_space_that_was_available_still_left =
976 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < 874 (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
977 old_generation_space_available_at_start_of_incremental_); 875 old_generation_space_available_at_start_of_incremental_);
978 876
979 if (space_left_is_very_small || 877 if (space_left_is_very_small ||
980 only_1_nth_of_space_that_was_available_still_left) { 878 only_1_nth_of_space_that_was_available_still_left) {
981 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); 879 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
982 speed_up = true; 880 speed_up = true;
983 } 881 }
984 882
985 bool size_of_old_space_multiplied_by_n_during_marking = 883 bool size_of_old_space_multiplied_by_n_during_marking =
986 (heap_->PromotedTotalSize() > 884 (heap_->PromotedTotalSize() >
987 (marking_speed_ + 1) * 885 (allocation_marking_factor_ + 1) *
988 old_generation_space_used_at_start_of_incremental_); 886 old_generation_space_used_at_start_of_incremental_);
989 if (size_of_old_space_multiplied_by_n_during_marking) { 887 if (size_of_old_space_multiplied_by_n_during_marking) {
990 speed_up = true; 888 speed_up = true;
991 if (FLAG_trace_gc) { 889 if (FLAG_trace_gc) {
992 PrintPID("Speed up marking because of heap size increase\n"); 890 PrintPID("Speed up marking because of heap size increase\n");
993 } 891 }
994 } 892 }
995 893
996 int64_t promoted_during_marking = heap_->PromotedTotalSize() 894 int64_t promoted_during_marking = heap_->PromotedTotalSize()
997 - old_generation_space_used_at_start_of_incremental_; 895 - old_generation_space_used_at_start_of_incremental_;
998 intptr_t delay = marking_speed_ * MB; 896 intptr_t delay = allocation_marking_factor_ * MB;
999 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); 897 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1000 898
1001 // We try to scan at at least twice the speed that we are allocating. 899 // We try to scan at at least twice the speed that we are allocating.
1002 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { 900 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
1003 if (FLAG_trace_gc) { 901 if (FLAG_trace_gc) {
1004 PrintPID("Speed up marking because marker was not keeping up\n"); 902 PrintPID("Speed up marking because marker was not keeping up\n");
1005 } 903 }
1006 speed_up = true; 904 speed_up = true;
1007 } 905 }
1008 906
1009 if (speed_up) { 907 if (speed_up) {
1010 if (state_ != MARKING) { 908 if (state_ != MARKING) {
1011 if (FLAG_trace_gc) { 909 if (FLAG_trace_gc) {
1012 PrintPID("Postponing speeding up marking until marking starts\n"); 910 PrintPID("Postponing speeding up marking until marking starts\n");
1013 } 911 }
1014 } else { 912 } else {
1015 marking_speed_ += kMarkingSpeedAccellerationInterval; 913 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
1016 marking_speed_ = static_cast<int>( 914 allocation_marking_factor_ = static_cast<int>(
1017 Min(kMaxMarkingSpeed, 915 Min(kMaxAllocationMarkingFactor,
1018 static_cast<intptr_t>(marking_speed_ * 1.3))); 916 static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
1019 if (FLAG_trace_gc) { 917 if (FLAG_trace_gc) {
1020 PrintPID("Marking speed increased to %d\n", marking_speed_); 918 PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
1021 } 919 }
1022 } 920 }
1023 } 921 }
1024 922
1025 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { 923 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
1026 double end = OS::TimeCurrentMillis(); 924 double end = OS::TimeCurrentMillis();
1027 double delta = (end - start); 925 double delta = (end - start);
1028 longest_step_ = Max(longest_step_, delta); 926 longest_step_ = Max(longest_step_, delta);
1029 steps_took_ += delta; 927 steps_took_ += delta;
1030 steps_took_since_last_gc_ += delta; 928 steps_took_since_last_gc_ += delta;
1031 } 929 }
1032 } 930 }
1033 931
1034 932
1035 void IncrementalMarking::ResetStepCounters() { 933 void IncrementalMarking::ResetStepCounters() {
1036 steps_count_ = 0; 934 steps_count_ = 0;
1037 steps_took_ = 0; 935 steps_took_ = 0;
1038 longest_step_ = 0.0; 936 longest_step_ = 0.0;
1039 old_generation_space_available_at_start_of_incremental_ = 937 old_generation_space_available_at_start_of_incremental_ =
1040 SpaceLeftInOldSpace(); 938 SpaceLeftInOldSpace();
1041 old_generation_space_used_at_start_of_incremental_ = 939 old_generation_space_used_at_start_of_incremental_ =
1042 heap_->PromotedTotalSize(); 940 heap_->PromotedTotalSize();
1043 steps_count_since_last_gc_ = 0; 941 steps_count_since_last_gc_ = 0;
1044 steps_took_since_last_gc_ = 0; 942 steps_took_since_last_gc_ = 0;
1045 bytes_rescanned_ = 0; 943 bytes_rescanned_ = 0;
1046 marking_speed_ = kInitialMarkingSpeed; 944 allocation_marking_factor_ = kInitialAllocationMarkingFactor;
1047 bytes_scanned_ = 0; 945 bytes_scanned_ = 0;
1048 write_barriers_invoked_since_last_step_ = 0;
1049 } 946 }
1050 947
1051 948
1052 int64_t IncrementalMarking::SpaceLeftInOldSpace() { 949 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1053 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); 950 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1054 } 951 }
1055 952
1056 } } // namespace v8::internal 953 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/incremental-marking.h ('k') | src/incremental-marking-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698