| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 737 state_ = COMPLETE; | 737 state_ = COMPLETE; |
| 738 // We will set the stack guard to request a GC now. This will mean the rest | 738 // We will set the stack guard to request a GC now. This will mean the rest |
| 739 // of the GC gets performed as soon as possible (we can't do a GC here in a | 739 // of the GC gets performed as soon as possible (we can't do a GC here in a |
| 740 // record-write context). If a few things get allocated between now and then | 740 // record-write context). If a few things get allocated between now and then |
| 741 // that shouldn't make us do a scavenge and keep being incremental, so we set | 741 // that shouldn't make us do a scavenge and keep being incremental, so we set |
| 742 // the should-hurry flag to indicate that there can't be much work left to do. | 742 // the should-hurry flag to indicate that there can't be much work left to do. |
| 743 set_should_hurry(true); | 743 set_should_hurry(true); |
| 744 if (FLAG_trace_incremental_marking) { | 744 if (FLAG_trace_incremental_marking) { |
| 745 PrintF("[IncrementalMarking] Complete (normal).\n"); | 745 PrintF("[IncrementalMarking] Complete (normal).\n"); |
| 746 } | 746 } |
| 747 heap_->isolate()->stack_guard()->RequestGC(); | 747 if (!heap_->idle_notification_will_schedule_next_gc()) { |
| 748 heap_->isolate()->stack_guard()->RequestGC(); |
| 749 } |
| 748 } | 750 } |
| 749 | 751 |
| 750 | 752 |
| 751 void IncrementalMarking::Step(intptr_t allocated_bytes) { | 753 void IncrementalMarking::Step(intptr_t allocated_bytes) { |
| 752 if (heap_->gc_state() != Heap::NOT_IN_GC || | 754 if (heap_->gc_state() != Heap::NOT_IN_GC || |
| 753 !FLAG_incremental_marking || | 755 !FLAG_incremental_marking || |
| 754 !FLAG_incremental_marking_steps || | 756 !FLAG_incremental_marking_steps || |
| 755 (state_ != SWEEPING && state_ != MARKING)) { | 757 (state_ != SWEEPING && state_ != MARKING)) { |
| 756 return; | 758 return; |
| 757 } | 759 } |
| 758 | 760 |
| 759 allocated_ += allocated_bytes; | 761 allocated_ += allocated_bytes; |
| 760 | 762 |
| 761 if (allocated_ < kAllocatedThreshold) return; | 763 if (allocated_ < kAllocatedThreshold) return; |
| 762 | 764 |
| 763 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | 765 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
| 764 | 766 |
| 765 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; | 767 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; |
| 766 bytes_scanned_ += bytes_to_process; | 768 bytes_scanned_ += bytes_to_process; |
| 767 | 769 |
| 768 double start = 0; | 770 double start = 0; |
| 769 | 771 |
| 770 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 772 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
| 771 start = OS::TimeCurrentMillis(); | 773 start = OS::TimeCurrentMillis(); |
| 772 } | 774 } |
| 773 | 775 |
| 774 if (state_ == SWEEPING) { | 776 if (state_ == SWEEPING) { |
| 775 if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && | 777 if (heap_->AdvanceSweepers(bytes_to_process)) { |
| 776 heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) { | |
| 777 bytes_scanned_ = 0; | 778 bytes_scanned_ = 0; |
| 778 StartMarking(PREVENT_COMPACTION); | 779 StartMarking(PREVENT_COMPACTION); |
| 779 } | 780 } |
| 780 } else if (state_ == MARKING) { | 781 } else if (state_ == MARKING) { |
| 781 Map* filler_map = heap_->one_pointer_filler_map(); | 782 Map* filler_map = heap_->one_pointer_filler_map(); |
| 782 Map* global_context_map = heap_->global_context_map(); | 783 Map* global_context_map = heap_->global_context_map(); |
| 783 IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); | 784 IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); |
| 784 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { | 785 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { |
| 785 HeapObject* obj = marking_deque_.Pop(); | 786 HeapObject* obj = marking_deque_.Pop(); |
| 786 | 787 |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 911 allocation_marking_factor_ = kInitialAllocationMarkingFactor; | 912 allocation_marking_factor_ = kInitialAllocationMarkingFactor; |
| 912 bytes_scanned_ = 0; | 913 bytes_scanned_ = 0; |
| 913 } | 914 } |
| 914 | 915 |
| 915 | 916 |
| 916 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 917 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 917 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); | 918 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); |
| 918 } | 919 } |
| 919 | 920 |
| 920 } } // namespace v8::internal | 921 } } // namespace v8::internal |
| OLD | NEW |