| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 43 marking_deque_memory_(NULL), | 43 marking_deque_memory_(NULL), |
| 44 steps_count_(0), | 44 steps_count_(0), |
| 45 steps_took_(0), | 45 steps_took_(0), |
| 46 longest_step_(0.0), | 46 longest_step_(0.0), |
| 47 old_generation_space_available_at_start_of_incremental_(0), | 47 old_generation_space_available_at_start_of_incremental_(0), |
| 48 old_generation_space_used_at_start_of_incremental_(0), | 48 old_generation_space_used_at_start_of_incremental_(0), |
| 49 steps_count_since_last_gc_(0), | 49 steps_count_since_last_gc_(0), |
| 50 steps_took_since_last_gc_(0), | 50 steps_took_since_last_gc_(0), |
| 51 should_hurry_(false), | 51 should_hurry_(false), |
| 52 allocation_marking_factor_(0), | 52 allocation_marking_factor_(0), |
| 53 allocated_(0) { | 53 allocated_(0), |
| 54 no_marking_scope_depth_(0) { |
| 54 } | 55 } |
| 55 | 56 |
| 56 | 57 |
| 57 void IncrementalMarking::TearDown() { | 58 void IncrementalMarking::TearDown() { |
| 58 delete marking_deque_memory_; | 59 delete marking_deque_memory_; |
| 59 } | 60 } |
| 60 | 61 |
| 61 | 62 |
| 62 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, | 63 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, |
| 63 Object* value, | 64 Object* value, |
| (...skipping 16 matching lines...) Expand all Loading... |
| 80 | 81 |
| 81 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, | 82 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, |
| 82 Object** slot, | 83 Object** slot, |
| 83 Isolate* isolate) { | 84 Isolate* isolate) { |
| 84 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 85 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 85 ASSERT(marking->is_compacting_); | 86 ASSERT(marking->is_compacting_); |
| 86 marking->RecordWrite(obj, slot, *slot); | 87 marking->RecordWrite(obj, slot, *slot); |
| 87 } | 88 } |
| 88 | 89 |
| 89 | 90 |
| 91 void IncrementalMarking::RecordCodeTargetPatch(Code* host, |
| 92 Address pc, |
| 93 HeapObject* value) { |
| 94 if (IsMarking()) { |
| 95 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 96 RecordWriteIntoCode(host, &rinfo, value); |
| 97 } |
| 98 } |
| 99 |
| 100 |
| 90 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { | 101 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { |
| 91 if (IsMarking()) { | 102 if (IsMarking()) { |
| 92 Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> | 103 Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> |
| 93 GcSafeFindCodeForInnerPointer(pc); | 104 GcSafeFindCodeForInnerPointer(pc); |
| 94 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 105 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 95 RecordWriteIntoCode(host, &rinfo, value); | 106 RecordWriteIntoCode(host, &rinfo, value); |
| 96 } | 107 } |
| 97 } | 108 } |
| 98 | 109 |
| 99 | 110 |
| (...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 336 | 347 |
| 337 bool IncrementalMarking::WorthActivating() { | 348 bool IncrementalMarking::WorthActivating() { |
| 338 #ifndef DEBUG | 349 #ifndef DEBUG |
| 339 static const intptr_t kActivationThreshold = 8 * MB; | 350 static const intptr_t kActivationThreshold = 8 * MB; |
| 340 #else | 351 #else |
| 341 // TODO(gc) consider setting this to some low level so that some | 352 // TODO(gc) consider setting this to some low level so that some |
| 342 // debug tests run with incremental marking and some without. | 353 // debug tests run with incremental marking and some without. |
| 343 static const intptr_t kActivationThreshold = 0; | 354 static const intptr_t kActivationThreshold = 0; |
| 344 #endif | 355 #endif |
| 345 | 356 |
| 346 return FLAG_incremental_marking && | 357 return !FLAG_expose_gc && |
| 358 FLAG_incremental_marking && |
| 347 !Serializer::enabled() && | 359 !Serializer::enabled() && |
| 348 heap_->PromotedSpaceSize() > kActivationThreshold; | 360 heap_->PromotedSpaceSize() > kActivationThreshold; |
| 349 } | 361 } |
| 350 | 362 |
| 351 | 363 |
| 352 void IncrementalMarking::ActivateGeneratedStub(Code* stub) { | 364 void IncrementalMarking::ActivateGeneratedStub(Code* stub) { |
| 353 ASSERT(RecordWriteStub::GetMode(stub) == | 365 ASSERT(RecordWriteStub::GetMode(stub) == |
| 354 RecordWriteStub::STORE_BUFFER_ONLY); | 366 RecordWriteStub::STORE_BUFFER_ONLY); |
| 355 | 367 |
| 356 if (!IsMarking()) { | 368 if (!IsMarking()) { |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 454 // Initialize marking stack. | 466 // Initialize marking stack. |
| 455 Address addr = static_cast<Address>(marking_deque_memory_->address()); | 467 Address addr = static_cast<Address>(marking_deque_memory_->address()); |
| 456 size_t size = marking_deque_memory_->size(); | 468 size_t size = marking_deque_memory_->size(); |
| 457 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; | 469 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
| 458 marking_deque_.Initialize(addr, addr + size); | 470 marking_deque_.Initialize(addr, addr + size); |
| 459 | 471 |
| 460 ActivateIncrementalWriteBarrier(); | 472 ActivateIncrementalWriteBarrier(); |
| 461 | 473 |
| 462 #ifdef DEBUG | 474 #ifdef DEBUG |
| 463 // Marking bits are cleared by the sweeper. | 475 // Marking bits are cleared by the sweeper. |
| 464 heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); | 476 if (FLAG_verify_heap) { |
| 477 heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); |
| 478 } |
| 465 #endif | 479 #endif |
| 466 | 480 |
| 467 heap_->CompletelyClearInstanceofCache(); | 481 heap_->CompletelyClearInstanceofCache(); |
| 468 heap_->isolate()->compilation_cache()->MarkCompactPrologue(); | 482 heap_->isolate()->compilation_cache()->MarkCompactPrologue(); |
| 469 | 483 |
| 470 if (FLAG_cleanup_code_caches_at_gc) { | 484 if (FLAG_cleanup_code_caches_at_gc) { |
| 471 // We will mark cache black with a separate pass | 485 // We will mark cache black with a separate pass |
| 472 // when we finish marking. | 486 // when we finish marking. |
| 473 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); | 487 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); |
| 474 } | 488 } |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 685 !FLAG_incremental_marking || | 699 !FLAG_incremental_marking || |
| 686 !FLAG_incremental_marking_steps || | 700 !FLAG_incremental_marking_steps || |
| 687 (state_ != SWEEPING && state_ != MARKING)) { | 701 (state_ != SWEEPING && state_ != MARKING)) { |
| 688 return; | 702 return; |
| 689 } | 703 } |
| 690 | 704 |
| 691 allocated_ += allocated_bytes; | 705 allocated_ += allocated_bytes; |
| 692 | 706 |
| 693 if (allocated_ < kAllocatedThreshold) return; | 707 if (allocated_ < kAllocatedThreshold) return; |
| 694 | 708 |
| 709 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
| 710 |
| 695 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; | 711 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; |
| 696 | 712 |
| 697 double start = 0; | 713 double start = 0; |
| 698 | 714 |
| 699 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 715 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
| 700 start = OS::TimeCurrentMillis(); | 716 start = OS::TimeCurrentMillis(); |
| 701 } | 717 } |
| 702 | 718 |
| 703 if (state_ == SWEEPING) { | 719 if (state_ == SWEEPING) { |
| 704 if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && | 720 if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && |
| (...skipping 27 matching lines...) Expand all Loading... |
| 732 // We will mark cache black with a separate pass | 748 // We will mark cache black with a separate pass |
| 733 // when we finish marking. | 749 // when we finish marking. |
| 734 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); | 750 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); |
| 735 | 751 |
| 736 VisitGlobalContext(ctx, &marking_visitor); | 752 VisitGlobalContext(ctx, &marking_visitor); |
| 737 } else { | 753 } else { |
| 738 obj->IterateBody(map->instance_type(), size, &marking_visitor); | 754 obj->IterateBody(map->instance_type(), size, &marking_visitor); |
| 739 } | 755 } |
| 740 | 756 |
| 741 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); | 757 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); |
| 742 ASSERT(Marking::IsGrey(obj_mark_bit) || | 758 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || |
| 743 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); | 759 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); |
| 744 Marking::MarkBlack(obj_mark_bit); | 760 Marking::MarkBlack(obj_mark_bit); |
| 745 MemoryChunk::IncrementLiveBytes(obj->address(), size); | 761 MemoryChunk::IncrementLiveBytes(obj->address(), size); |
| 746 } | 762 } |
| 747 if (marking_deque_.IsEmpty()) MarkingComplete(); | 763 if (marking_deque_.IsEmpty()) MarkingComplete(); |
| 748 } | 764 } |
| 749 | 765 |
| 750 allocated_ = 0; | 766 allocated_ = 0; |
| 751 | 767 |
| 752 steps_count_++; | 768 steps_count_++; |
| 753 steps_count_since_last_gc_++; | 769 steps_count_since_last_gc_++; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 809 bytes_rescanned_ = 0; | 825 bytes_rescanned_ = 0; |
| 810 allocation_marking_factor_ = kInitialAllocationMarkingFactor; | 826 allocation_marking_factor_ = kInitialAllocationMarkingFactor; |
| 811 } | 827 } |
| 812 | 828 |
| 813 | 829 |
| 814 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 830 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 815 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); | 831 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); |
| 816 } | 832 } |
| 817 | 833 |
| 818 } } // namespace v8::internal | 834 } } // namespace v8::internal |
| OLD | NEW |