Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(484)

Side by Side Diff: src/incremental-marking.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 50
51 51
52 void IncrementalMarking::TearDown() { 52 void IncrementalMarking::TearDown() {
53 delete marking_deque_memory_; 53 delete marking_deque_memory_;
54 } 54 }
55 55
56 56
57 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, 57 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
58 Object* value, 58 Object* value,
59 Isolate* isolate) { 59 Isolate* isolate) {
60 isolate->heap()->incremental_marking()->RecordWrite(obj, value); 60 ASSERT(obj->IsHeapObject());
61
62 IncrementalMarking* marking = isolate->heap()->incremental_marking();
63 ASSERT(!marking->is_compacting_);
64 marking->RecordWrite(obj, NULL, value);
61 } 65 }
62 66
63 67
68 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
69 Object** slot,
70 Isolate* isolate) {
71 IncrementalMarking* marking = isolate->heap()->incremental_marking();
72 ASSERT(marking->is_compacting_);
73 marking->RecordWrite(obj, slot, *slot);
74 }
75
76
64 class IncrementalMarkingMarkingVisitor : public ObjectVisitor { 77 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
65 public: 78 public:
66 IncrementalMarkingMarkingVisitor(Heap* heap, 79 IncrementalMarkingMarkingVisitor(Heap* heap,
67 IncrementalMarking* incremental_marking) 80 IncrementalMarking* incremental_marking)
68 : heap_(heap), 81 : heap_(heap),
69 incremental_marking_(incremental_marking) { 82 incremental_marking_(incremental_marking) {
70 } 83 }
71 84
72 void VisitPointer(Object** p) { 85 void VisitPointer(Object** p) {
73 MarkObjectByPointer(p); 86 MarkObjectByPointer(p, p);
74 } 87 }
75 88
76 void VisitPointers(Object** start, Object** end) { 89 void VisitPointers(Object** start, Object** end) {
77 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); 90 for (Object** p = start; p < end; p++) MarkObjectByPointer(start, p);
78 } 91 }
79 92
80 private: 93 private:
81 // Mark object pointed to by p. 94 // Mark object pointed to by p.
82 INLINE(void MarkObjectByPointer(Object** p)) { 95 INLINE(void MarkObjectByPointer(Object** anchor, Object** p)) {
83 Object* obj = *p; 96 Object* obj = *p;
84 // Since we can be sure that the object is not tagged as a failure we can 97 // Since we can be sure that the object is not tagged as a failure we can
85 // inline a slightly more efficient tag check here than IsHeapObject() would 98 // inline a slightly more efficient tag check here than IsHeapObject() would
86 // produce. 99 // produce.
87 if (obj->NonFailureIsHeapObject()) { 100 if (obj->NonFailureIsHeapObject()) {
88 HeapObject* heap_object = HeapObject::cast(obj); 101 HeapObject* heap_object = HeapObject::cast(obj);
102
103 heap_->mark_compact_collector()->RecordSlot(anchor, p, obj);
89 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); 104 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
90 if (mark_bit.data_only()) { 105 if (mark_bit.data_only()) {
91 incremental_marking_->MarkBlackOrKeepGrey(mark_bit); 106 incremental_marking_->MarkBlackOrKeepGrey(mark_bit);
92 } else if (Marking::IsWhite(mark_bit)) { 107 } else if (Marking::IsWhite(mark_bit)) {
93 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); 108 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
94 } 109 }
95 } 110 }
96 } 111 }
97 112
98 Heap* heap_; 113 Heap* heap_;
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
272 287
273 bool IncrementalMarking::WorthActivating() { 288 bool IncrementalMarking::WorthActivating() {
274 #ifndef DEBUG 289 #ifndef DEBUG
275 static const intptr_t kActivationThreshold = 8 * MB; 290 static const intptr_t kActivationThreshold = 8 * MB;
276 #else 291 #else
277 // TODO(gc) consider setting this to some low level so that some 292 // TODO(gc) consider setting this to some low level so that some
278 // debug tests run with incremental marking and some without. 293 // debug tests run with incremental marking and some without.
279 static const intptr_t kActivationThreshold = 0; 294 static const intptr_t kActivationThreshold = 0;
280 #endif 295 #endif
281 296
282 // TODO(gc) ISOLATES MERGE
283 return FLAG_incremental_marking && 297 return FLAG_incremental_marking &&
284 heap_->PromotedSpaceSize() > kActivationThreshold; 298 heap_->PromotedSpaceSize() > kActivationThreshold;
285 } 299 }
286 300
287 301
288 static void PatchIncrementalMarkingRecordWriteStubs(bool enable) { 302 static void PatchIncrementalMarkingRecordWriteStubs(Heap* heap,
289 NumberDictionary* stubs = HEAP->code_stubs(); 303 bool incremental,
304 bool compacting) {
305 NumberDictionary* stubs = heap->code_stubs();
290 306
291 int capacity = stubs->Capacity(); 307 int capacity = stubs->Capacity();
292 for (int i = 0; i < capacity; i++) { 308 for (int i = 0; i < capacity; i++) {
293 Object* k = stubs->KeyAt(i); 309 Object* k = stubs->KeyAt(i);
294 if (stubs->IsKey(k)) { 310 if (stubs->IsKey(k)) {
295 uint32_t key = NumberToUint32(k); 311 uint32_t key = NumberToUint32(k);
296 312
297 if (CodeStub::MajorKeyFromKey(key) == 313 if (CodeStub::MajorKeyFromKey(key) ==
298 CodeStub::RecordWrite) { 314 CodeStub::RecordWrite) {
299 Object* e = stubs->ValueAt(i); 315 Object* e = stubs->ValueAt(i);
300 if (e->IsCode()) { 316 if (e->IsCode()) {
301 RecordWriteStub::Patch(Code::cast(e), enable); 317 RecordWriteStub::Patch(Code::cast(e), incremental, compacting);
302 } 318 }
303 } 319 }
304 } 320 }
305 } 321 }
306 } 322 }
307 323
308 324
309 void IncrementalMarking::EnsureMarkingDequeIsCommitted() { 325 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
310 if (marking_deque_memory_ == NULL) { 326 if (marking_deque_memory_ == NULL) {
311 marking_deque_memory_ = new VirtualMemory(4 * MB); 327 marking_deque_memory_ = new VirtualMemory(4 * MB);
(...skipping 26 matching lines...) Expand all
338 354
339 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); 355 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
340 } 356 }
341 357
342 358
343 void IncrementalMarking::StartMarking() { 359 void IncrementalMarking::StartMarking() {
344 if (FLAG_trace_incremental_marking) { 360 if (FLAG_trace_incremental_marking) {
345 PrintF("[IncrementalMarking] Start marking\n"); 361 PrintF("[IncrementalMarking] Start marking\n");
346 } 362 }
347 363
364 is_compacting_ = !FLAG_never_compact &&
365 heap_->mark_compact_collector()->StartCompaction();
366
348 state_ = MARKING; 367 state_ = MARKING;
349 368
350 PatchIncrementalMarkingRecordWriteStubs(true); 369 PatchIncrementalMarkingRecordWriteStubs(heap_,
370 true,
Erik Corry 2011/07/04 11:04:11 This constant deserves a comment or an enum type.
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
371 is_compacting_);
351 372
352 EnsureMarkingDequeIsCommitted(); 373 EnsureMarkingDequeIsCommitted();
353 374
354 // Initialize marking stack. 375 // Initialize marking stack.
355 Address addr = static_cast<Address>(marking_deque_memory_->address()); 376 Address addr = static_cast<Address>(marking_deque_memory_->address());
356 int size = marking_deque_memory_->size(); 377 int size = marking_deque_memory_->size();
357 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; 378 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
358 marking_deque_.Initialize(addr, addr + size); 379 marking_deque_.Initialize(addr, addr + size);
359 380
360 // Clear markbits. 381 // Clear markbits.
(...skipping 30 matching lines...) Expand all
391 intptr_t current = marking_deque_.bottom(); 412 intptr_t current = marking_deque_.bottom();
392 intptr_t mask = marking_deque_.mask(); 413 intptr_t mask = marking_deque_.mask();
393 intptr_t limit = marking_deque_.top(); 414 intptr_t limit = marking_deque_.top();
394 HeapObject** array = marking_deque_.array(); 415 HeapObject** array = marking_deque_.array();
395 intptr_t new_top = current; 416 intptr_t new_top = current;
396 417
397 Map* filler_map = heap_->one_pointer_filler_map(); 418 Map* filler_map = heap_->one_pointer_filler_map();
398 419
399 while (current != limit) { 420 while (current != limit) {
400 HeapObject* obj = array[current]; 421 HeapObject* obj = array[current];
422 ASSERT(obj->IsHeapObject());
401 current = ((current + 1) & mask); 423 current = ((current + 1) & mask);
402 if (heap_->InNewSpace(obj)) { 424 if (heap_->InNewSpace(obj)) {
403 MapWord map_word = obj->map_word(); 425 MapWord map_word = obj->map_word();
404 if (map_word.IsForwardingAddress()) { 426 if (map_word.IsForwardingAddress()) {
405 HeapObject* dest = map_word.ToForwardingAddress(); 427 HeapObject* dest = map_word.ToForwardingAddress();
406 array[new_top] = dest; 428 array[new_top] = dest;
407 new_top = ((new_top + 1) & mask); 429 new_top = ((new_top + 1) & mask);
408 ASSERT(new_top != marking_deque_.bottom()); 430 ASSERT(new_top != marking_deque_.bottom());
409 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj))); 431 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
410 } 432 }
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 480
459 void IncrementalMarking::Abort() { 481 void IncrementalMarking::Abort() {
460 if (IsStopped()) return; 482 if (IsStopped()) return;
461 if (FLAG_trace_incremental_marking) { 483 if (FLAG_trace_incremental_marking) {
462 PrintF("[IncrementalMarking] Aborting.\n"); 484 PrintF("[IncrementalMarking] Aborting.\n");
463 } 485 }
464 heap_->new_space()->LowerInlineAllocationLimit(0); 486 heap_->new_space()->LowerInlineAllocationLimit(0);
465 IncrementalMarking::set_should_hurry(false); 487 IncrementalMarking::set_should_hurry(false);
466 ResetStepCounters(); 488 ResetStepCounters();
467 if (IsMarking()) { 489 if (IsMarking()) {
468 PatchIncrementalMarkingRecordWriteStubs(false); 490 PatchIncrementalMarkingRecordWriteStubs(heap_,
491 false,
Erik Corry 2011/07/04 11:04:11 These constants deserve comments or enum types.
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
492 false);
469 DeactivateIncrementalWriteBarrier(); 493 DeactivateIncrementalWriteBarrier();
470 } 494 }
471 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); 495 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
472 state_ = STOPPED; 496 state_ = STOPPED;
497 is_compacting_ = false;
473 } 498 }
474 499
475 500
476 void IncrementalMarking::Finalize() { 501 void IncrementalMarking::Finalize() {
477 Hurry(); 502 Hurry();
478 state_ = STOPPED; 503 state_ = STOPPED;
504 is_compacting_ = false;
479 heap_->new_space()->LowerInlineAllocationLimit(0); 505 heap_->new_space()->LowerInlineAllocationLimit(0);
480 IncrementalMarking::set_should_hurry(false); 506 IncrementalMarking::set_should_hurry(false);
481 ResetStepCounters(); 507 ResetStepCounters();
482 PatchIncrementalMarkingRecordWriteStubs(false); 508 PatchIncrementalMarkingRecordWriteStubs(heap_,
509 false,
Erik Corry 2011/07/04 11:04:11 & here
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
510 false);
483 DeactivateIncrementalWriteBarrier(); 511 DeactivateIncrementalWriteBarrier();
484 ASSERT(marking_deque_.IsEmpty()); 512 ASSERT(marking_deque_.IsEmpty());
485 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); 513 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
486 } 514 }
487 515
488 516
489 void IncrementalMarking::MarkingComplete() { 517 void IncrementalMarking::MarkingComplete() {
490 state_ = COMPLETE; 518 state_ = COMPLETE;
491 // We will set the stack guard to request a GC now. This will mean the rest 519 // We will set the stack guard to request a GC now. This will mean the rest
492 // of the GC gets performed as soon as possible (we can't do a GC here in a 520 // of the GC gets performed as soon as possible (we can't do a GC here in a
493 // record-write context). If a few things get allocated between now and then 521 // record-write context). If a few things get allocated between now and then
494 // that shouldn't make us do a scavenge and keep being incremental, so we set 522 // that shouldn't make us do a scavenge and keep being incremental, so we set
495 // the should-hurry flag to indicate that there can't be much work left to do. 523 // the should-hurry flag to indicate that there can't be much work left to do.
496 set_should_hurry(true); 524 set_should_hurry(true);
497 if (FLAG_trace_incremental_marking) { 525 if (FLAG_trace_incremental_marking) {
498 PrintF("[IncrementalMarking] Complete (normal).\n"); 526 PrintF("[IncrementalMarking] Complete (normal).\n");
499 } 527 }
500 // TODO(gc) ISOLATES 528 heap_->isolate()->stack_guard()->RequestGC();
501 ISOLATE->stack_guard()->RequestGC();
502 } 529 }
503 530
504 531
505 void IncrementalMarking::Step(intptr_t allocated_bytes) { 532 void IncrementalMarking::Step(intptr_t allocated_bytes) {
506 if (heap_->gc_state() != Heap::NOT_IN_GC) return; 533 if (heap_->gc_state() != Heap::NOT_IN_GC) return;
507 if (!FLAG_incremental_marking) return; 534 if (!FLAG_incremental_marking) return;
508 if (!FLAG_incremental_marking_steps) return; 535 if (!FLAG_incremental_marking_steps) return;
509 536
510 allocated_ += allocated_bytes; 537 allocated_ += allocated_bytes;
511 538
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { 593 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
567 double end = OS::TimeCurrentMillis(); 594 double end = OS::TimeCurrentMillis();
568 double delta = (end - start); 595 double delta = (end - start);
569 steps_took_ += delta; 596 steps_took_ += delta;
570 steps_took_since_last_gc_ += delta; 597 steps_took_since_last_gc_ += delta;
571 } 598 }
572 } 599 }
573 600
574 601
575 } } // namespace v8::internal 602 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698