Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(186)

Side by Side Diff: src/incremental-marking.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: fix presubmit, remove last debug check Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/incremental-marking.h ('k') | src/incremental-marking-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 50
51 51
52 void IncrementalMarking::TearDown() { 52 void IncrementalMarking::TearDown() {
53 delete marking_deque_memory_; 53 delete marking_deque_memory_;
54 } 54 }
55 55
56 56
57 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, 57 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
58 Object* value, 58 Object* value,
59 Isolate* isolate) { 59 Isolate* isolate) {
60 isolate->heap()->incremental_marking()->RecordWrite(obj, value); 60 ASSERT(obj->IsHeapObject());
61
62 IncrementalMarking* marking = isolate->heap()->incremental_marking();
63 ASSERT(!marking->is_compacting_);
64 marking->RecordWrite(obj, NULL, value);
61 } 65 }
62 66
63 67
68 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
69 Object** slot,
70 Isolate* isolate) {
71 IncrementalMarking* marking = isolate->heap()->incremental_marking();
72 ASSERT(marking->is_compacting_);
73 marking->RecordWrite(obj, slot, *slot);
74 }
75
76
64 class IncrementalMarkingMarkingVisitor : public ObjectVisitor { 77 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
65 public: 78 public:
66 IncrementalMarkingMarkingVisitor(Heap* heap, 79 IncrementalMarkingMarkingVisitor(Heap* heap,
67 IncrementalMarking* incremental_marking) 80 IncrementalMarking* incremental_marking)
68 : heap_(heap), 81 : heap_(heap),
69 incremental_marking_(incremental_marking) { 82 incremental_marking_(incremental_marking) {
70 } 83 }
71 84
72 void VisitPointer(Object** p) { 85 void VisitPointer(Object** p) {
73 MarkObjectByPointer(p); 86 MarkObjectByPointer(p, p);
74 } 87 }
75 88
76 void VisitPointers(Object** start, Object** end) { 89 void VisitPointers(Object** start, Object** end) {
77 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); 90 for (Object** p = start; p < end; p++) MarkObjectByPointer(start, p);
78 } 91 }
79 92
80 private: 93 private:
81 // Mark object pointed to by p. 94 // Mark object pointed to by p.
82 INLINE(void MarkObjectByPointer(Object** p)) { 95 INLINE(void MarkObjectByPointer(Object** anchor, Object** p)) {
83 Object* obj = *p; 96 Object* obj = *p;
84 // Since we can be sure that the object is not tagged as a failure we can 97 // Since we can be sure that the object is not tagged as a failure we can
85 // inline a slightly more efficient tag check here than IsHeapObject() would 98 // inline a slightly more efficient tag check here than IsHeapObject() would
86 // produce. 99 // produce.
87 if (obj->NonFailureIsHeapObject()) { 100 if (obj->NonFailureIsHeapObject()) {
88 HeapObject* heap_object = HeapObject::cast(obj); 101 HeapObject* heap_object = HeapObject::cast(obj);
102
103 heap_->mark_compact_collector()->RecordSlot(anchor, p, obj);
89 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); 104 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
90 if (mark_bit.data_only()) { 105 if (mark_bit.data_only()) {
91 incremental_marking_->MarkBlackOrKeepGrey(mark_bit); 106 incremental_marking_->MarkBlackOrKeepGrey(mark_bit);
92 } else if (Marking::IsWhite(mark_bit)) { 107 } else if (Marking::IsWhite(mark_bit)) {
93 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); 108 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
94 } 109 }
95 } 110 }
96 } 111 }
97 112
98 Heap* heap_; 113 Heap* heap_;
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
272 287
273 bool IncrementalMarking::WorthActivating() { 288 bool IncrementalMarking::WorthActivating() {
274 #ifndef DEBUG 289 #ifndef DEBUG
275 static const intptr_t kActivationThreshold = 8 * MB; 290 static const intptr_t kActivationThreshold = 8 * MB;
276 #else 291 #else
277 // TODO(gc) consider setting this to some low level so that some 292 // TODO(gc) consider setting this to some low level so that some
278 // debug tests run with incremental marking and some without. 293 // debug tests run with incremental marking and some without.
279 static const intptr_t kActivationThreshold = 0; 294 static const intptr_t kActivationThreshold = 0;
280 #endif 295 #endif
281 296
282 // TODO(gc) ISOLATES MERGE
283 return FLAG_incremental_marking && 297 return FLAG_incremental_marking &&
284 heap_->PromotedSpaceSize() > kActivationThreshold; 298 heap_->PromotedSpaceSize() > kActivationThreshold;
285 } 299 }
286 300
287 301
288 static void PatchIncrementalMarkingRecordWriteStubs(bool enable) { 302 static void PatchIncrementalMarkingRecordWriteStubs(
289 NumberDictionary* stubs = HEAP->code_stubs(); 303 Heap* heap, RecordWriteStub::Mode mode) {
304 NumberDictionary* stubs = heap->code_stubs();
290 305
291 int capacity = stubs->Capacity(); 306 int capacity = stubs->Capacity();
292 for (int i = 0; i < capacity; i++) { 307 for (int i = 0; i < capacity; i++) {
293 Object* k = stubs->KeyAt(i); 308 Object* k = stubs->KeyAt(i);
294 if (stubs->IsKey(k)) { 309 if (stubs->IsKey(k)) {
295 uint32_t key = NumberToUint32(k); 310 uint32_t key = NumberToUint32(k);
296 311
297 if (CodeStub::MajorKeyFromKey(key) == 312 if (CodeStub::MajorKeyFromKey(key) ==
298 CodeStub::RecordWrite) { 313 CodeStub::RecordWrite) {
299 Object* e = stubs->ValueAt(i); 314 Object* e = stubs->ValueAt(i);
300 if (e->IsCode()) { 315 if (e->IsCode()) {
301 RecordWriteStub::Patch(Code::cast(e), enable); 316 RecordWriteStub::Patch(Code::cast(e), mode);
302 } 317 }
303 } 318 }
304 } 319 }
305 } 320 }
306 } 321 }
307 322
308 323
309 void IncrementalMarking::EnsureMarkingDequeIsCommitted() { 324 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
310 if (marking_deque_memory_ == NULL) { 325 if (marking_deque_memory_ == NULL) {
311 marking_deque_memory_ = new VirtualMemory(4 * MB); 326 marking_deque_memory_ = new VirtualMemory(4 * MB);
(...skipping 26 matching lines...) Expand all
338 353
339 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); 354 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
340 } 355 }
341 356
342 357
343 void IncrementalMarking::StartMarking() { 358 void IncrementalMarking::StartMarking() {
344 if (FLAG_trace_incremental_marking) { 359 if (FLAG_trace_incremental_marking) {
345 PrintF("[IncrementalMarking] Start marking\n"); 360 PrintF("[IncrementalMarking] Start marking\n");
346 } 361 }
347 362
363 is_compacting_ = !FLAG_never_compact &&
364 heap_->mark_compact_collector()->StartCompaction();
365
348 state_ = MARKING; 366 state_ = MARKING;
349 367
350 PatchIncrementalMarkingRecordWriteStubs(true); 368 RecordWriteStub::Mode mode = is_compacting_ ?
369 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
370
371 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
351 372
352 EnsureMarkingDequeIsCommitted(); 373 EnsureMarkingDequeIsCommitted();
353 374
354 // Initialize marking stack. 375 // Initialize marking stack.
355 Address addr = static_cast<Address>(marking_deque_memory_->address()); 376 Address addr = static_cast<Address>(marking_deque_memory_->address());
356 int size = marking_deque_memory_->size(); 377 int size = marking_deque_memory_->size();
357 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; 378 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
358 marking_deque_.Initialize(addr, addr + size); 379 marking_deque_.Initialize(addr, addr + size);
359 380
360 // Clear markbits. 381 // Clear markbits.
(...skipping 30 matching lines...) Expand all
391 intptr_t current = marking_deque_.bottom(); 412 intptr_t current = marking_deque_.bottom();
392 intptr_t mask = marking_deque_.mask(); 413 intptr_t mask = marking_deque_.mask();
393 intptr_t limit = marking_deque_.top(); 414 intptr_t limit = marking_deque_.top();
394 HeapObject** array = marking_deque_.array(); 415 HeapObject** array = marking_deque_.array();
395 intptr_t new_top = current; 416 intptr_t new_top = current;
396 417
397 Map* filler_map = heap_->one_pointer_filler_map(); 418 Map* filler_map = heap_->one_pointer_filler_map();
398 419
399 while (current != limit) { 420 while (current != limit) {
400 HeapObject* obj = array[current]; 421 HeapObject* obj = array[current];
422 ASSERT(obj->IsHeapObject());
401 current = ((current + 1) & mask); 423 current = ((current + 1) & mask);
402 if (heap_->InNewSpace(obj)) { 424 if (heap_->InNewSpace(obj)) {
403 MapWord map_word = obj->map_word(); 425 MapWord map_word = obj->map_word();
404 if (map_word.IsForwardingAddress()) { 426 if (map_word.IsForwardingAddress()) {
405 HeapObject* dest = map_word.ToForwardingAddress(); 427 HeapObject* dest = map_word.ToForwardingAddress();
406 array[new_top] = dest; 428 array[new_top] = dest;
407 new_top = ((new_top + 1) & mask); 429 new_top = ((new_top + 1) & mask);
408 ASSERT(new_top != marking_deque_.bottom()); 430 ASSERT(new_top != marking_deque_.bottom());
409 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj))); 431 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
410 } 432 }
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 480
459 void IncrementalMarking::Abort() { 481 void IncrementalMarking::Abort() {
460 if (IsStopped()) return; 482 if (IsStopped()) return;
461 if (FLAG_trace_incremental_marking) { 483 if (FLAG_trace_incremental_marking) {
462 PrintF("[IncrementalMarking] Aborting.\n"); 484 PrintF("[IncrementalMarking] Aborting.\n");
463 } 485 }
464 heap_->new_space()->LowerInlineAllocationLimit(0); 486 heap_->new_space()->LowerInlineAllocationLimit(0);
465 IncrementalMarking::set_should_hurry(false); 487 IncrementalMarking::set_should_hurry(false);
466 ResetStepCounters(); 488 ResetStepCounters();
467 if (IsMarking()) { 489 if (IsMarking()) {
468 PatchIncrementalMarkingRecordWriteStubs(false); 490 PatchIncrementalMarkingRecordWriteStubs(heap_,
491 RecordWriteStub::STORE_BUFFER_ONLY);
469 DeactivateIncrementalWriteBarrier(); 492 DeactivateIncrementalWriteBarrier();
470 } 493 }
471 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); 494 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
472 state_ = STOPPED; 495 state_ = STOPPED;
496 is_compacting_ = false;
473 } 497 }
474 498
475 499
476 void IncrementalMarking::Finalize() { 500 void IncrementalMarking::Finalize() {
477 Hurry(); 501 Hurry();
478 state_ = STOPPED; 502 state_ = STOPPED;
503 is_compacting_ = false;
479 heap_->new_space()->LowerInlineAllocationLimit(0); 504 heap_->new_space()->LowerInlineAllocationLimit(0);
480 IncrementalMarking::set_should_hurry(false); 505 IncrementalMarking::set_should_hurry(false);
481 ResetStepCounters(); 506 ResetStepCounters();
482 PatchIncrementalMarkingRecordWriteStubs(false); 507 PatchIncrementalMarkingRecordWriteStubs(heap_,
508 RecordWriteStub::STORE_BUFFER_ONLY);
483 DeactivateIncrementalWriteBarrier(); 509 DeactivateIncrementalWriteBarrier();
484 ASSERT(marking_deque_.IsEmpty()); 510 ASSERT(marking_deque_.IsEmpty());
485 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); 511 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
486 } 512 }
487 513
488 514
489 void IncrementalMarking::MarkingComplete() { 515 void IncrementalMarking::MarkingComplete() {
490 state_ = COMPLETE; 516 state_ = COMPLETE;
491 // We will set the stack guard to request a GC now. This will mean the rest 517 // We will set the stack guard to request a GC now. This will mean the rest
492 // of the GC gets performed as soon as possible (we can't do a GC here in a 518 // of the GC gets performed as soon as possible (we can't do a GC here in a
493 // record-write context). If a few things get allocated between now and then 519 // record-write context). If a few things get allocated between now and then
494 // that shouldn't make us do a scavenge and keep being incremental, so we set 520 // that shouldn't make us do a scavenge and keep being incremental, so we set
495 // the should-hurry flag to indicate that there can't be much work left to do. 521 // the should-hurry flag to indicate that there can't be much work left to do.
496 set_should_hurry(true); 522 set_should_hurry(true);
497 if (FLAG_trace_incremental_marking) { 523 if (FLAG_trace_incremental_marking) {
498 PrintF("[IncrementalMarking] Complete (normal).\n"); 524 PrintF("[IncrementalMarking] Complete (normal).\n");
499 } 525 }
500 // TODO(gc) ISOLATES 526 heap_->isolate()->stack_guard()->RequestGC();
501 ISOLATE->stack_guard()->RequestGC();
502 } 527 }
503 528
504 529
505 void IncrementalMarking::Step(intptr_t allocated_bytes) { 530 void IncrementalMarking::Step(intptr_t allocated_bytes) {
506 if (heap_->gc_state() != Heap::NOT_IN_GC) return; 531 if (heap_->gc_state() != Heap::NOT_IN_GC) return;
507 if (!FLAG_incremental_marking) return; 532 if (!FLAG_incremental_marking) return;
508 if (!FLAG_incremental_marking_steps) return; 533 if (!FLAG_incremental_marking_steps) return;
509 534
510 allocated_ += allocated_bytes; 535 allocated_ += allocated_bytes;
511 536
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { 591 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
567 double end = OS::TimeCurrentMillis(); 592 double end = OS::TimeCurrentMillis();
568 double delta = (end - start); 593 double delta = (end - start);
569 steps_took_ += delta; 594 steps_took_ += delta;
570 steps_took_since_last_gc_ += delta; 595 steps_took_since_last_gc_ += delta;
571 } 596 }
572 } 597 }
573 598
574 599
575 } } // namespace v8::internal 600 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/incremental-marking.h ('k') | src/incremental-marking-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698