Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(52)

Side by Side Diff: src/heap/heap.cc

Issue 1314843010: Version 4.6.85.10 (cherry-pick) (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@4.6
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 crankshaft_codegen_bytes_generated_(0), 126 crankshaft_codegen_bytes_generated_(0),
127 new_space_allocation_counter_(0), 127 new_space_allocation_counter_(0),
128 old_generation_allocation_counter_(0), 128 old_generation_allocation_counter_(0),
129 old_generation_size_at_last_gc_(0), 129 old_generation_size_at_last_gc_(0),
130 gcs_since_last_deopt_(0), 130 gcs_since_last_deopt_(0),
131 allocation_sites_scratchpad_length_(0), 131 allocation_sites_scratchpad_length_(0),
132 ring_buffer_full_(false), 132 ring_buffer_full_(false),
133 ring_buffer_end_(0), 133 ring_buffer_end_(0),
134 promotion_queue_(this), 134 promotion_queue_(this),
135 configured_(false), 135 configured_(false),
136 current_gc_flags_(Heap::kNoGCFlags),
136 external_string_table_(this), 137 external_string_table_(this),
137 chunks_queued_for_free_(NULL), 138 chunks_queued_for_free_(NULL),
138 gc_callbacks_depth_(0), 139 gc_callbacks_depth_(0),
139 deserialization_complete_(false), 140 deserialization_complete_(false),
140 concurrent_sweeping_enabled_(false), 141 concurrent_sweeping_enabled_(false),
141 strong_roots_list_(NULL) { 142 strong_roots_list_(NULL) {
142 // Allow build-time customization of the max semispace size. Building 143 // Allow build-time customization of the max semispace size. Building
143 // V8 with snapshots and a non-default max semispace size is much 144 // V8 with snapshots and a non-default max semispace size is much
144 // easier if you can define it as part of the build environment. 145 // easier if you can define it as part of the build environment.
145 #if defined(V8_MAX_SEMISPACE_SIZE) 146 #if defined(V8_MAX_SEMISPACE_SIZE)
(...skipping 591 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 } 738 }
738 // We must not compact the weak fixed list here, as we may be in the middle 739 // We must not compact the weak fixed list here, as we may be in the middle
739 // of writing to it, when the GC triggered. Instead, we reset the root value. 740 // of writing to it, when the GC triggered. Instead, we reset the root value.
740 set_weak_stack_trace_list(Smi::FromInt(0)); 741 set_weak_stack_trace_list(Smi::FromInt(0));
741 } 742 }
742 743
743 744
744 void Heap::HandleGCRequest() { 745 void Heap::HandleGCRequest() {
745 if (incremental_marking()->request_type() == 746 if (incremental_marking()->request_type() ==
746 IncrementalMarking::COMPLETE_MARKING) { 747 IncrementalMarking::COMPLETE_MARKING) {
747 CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt", 748 CollectAllGarbage(current_gc_flags(), "GC interrupt",
748 incremental_marking()->CallbackFlags()); 749 incremental_marking()->CallbackFlags());
749 return; 750 return;
750 } 751 }
751 DCHECK(FLAG_overapproximate_weak_closure); 752 DCHECK(FLAG_overapproximate_weak_closure);
752 if (!incremental_marking()->weak_closure_was_overapproximated()) { 753 if (!incremental_marking()->weak_closure_was_overapproximated()) {
753 OverApproximateWeakClosure("GC interrupt"); 754 OverApproximateWeakClosure("GC interrupt");
754 } 755 }
755 } 756 }
756 757
757 758
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
790 } 791 }
791 } 792 }
792 } 793 }
793 794
794 795
795 void Heap::CollectAllGarbage(int flags, const char* gc_reason, 796 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
796 const v8::GCCallbackFlags gc_callback_flags) { 797 const v8::GCCallbackFlags gc_callback_flags) {
797 // Since we are ignoring the return value, the exact choice of space does 798 // Since we are ignoring the return value, the exact choice of space does
798 // not matter, so long as we do not specify NEW_SPACE, which would not 799 // not matter, so long as we do not specify NEW_SPACE, which would not
799 // cause a full GC. 800 // cause a full GC.
800 mark_compact_collector_.SetFlags(flags); 801 set_current_gc_flags(flags);
801 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); 802 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
802 mark_compact_collector_.SetFlags(kNoGCFlags); 803 set_current_gc_flags(kNoGCFlags);
803 } 804 }
804 805
805 806
806 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { 807 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
807 // Since we are ignoring the return value, the exact choice of space does 808 // Since we are ignoring the return value, the exact choice of space does
808 // not matter, so long as we do not specify NEW_SPACE, which would not 809 // not matter, so long as we do not specify NEW_SPACE, which would not
809 // cause a full GC. 810 // cause a full GC.
810 // Major GC would invoke weak handle callbacks on weakly reachable 811 // Major GC would invoke weak handle callbacks on weakly reachable
811 // handles, but won't collect weakly reachable objects until next 812 // handles, but won't collect weakly reachable objects until next
812 // major GC. Therefore if we collect aggressively and weak handle callback 813 // major GC. Therefore if we collect aggressively and weak handle callback
813 // has been invoked, we rerun major GC to release objects which become 814 // has been invoked, we rerun major GC to release objects which become
814 // garbage. 815 // garbage.
815 // Note: as weak callbacks can execute arbitrary code, we cannot 816 // Note: as weak callbacks can execute arbitrary code, we cannot
816 // hope that eventually there will be no weak callbacks invocations. 817 // hope that eventually there will be no weak callbacks invocations.
817 // Therefore stop recollecting after several attempts. 818 // Therefore stop recollecting after several attempts.
818 if (isolate()->concurrent_recompilation_enabled()) { 819 if (isolate()->concurrent_recompilation_enabled()) {
819 // The optimizing compiler may be unnecessarily holding on to memory. 820 // The optimizing compiler may be unnecessarily holding on to memory.
820 DisallowHeapAllocation no_recursive_gc; 821 DisallowHeapAllocation no_recursive_gc;
821 isolate()->optimizing_compile_dispatcher()->Flush(); 822 isolate()->optimizing_compile_dispatcher()->Flush();
822 } 823 }
823 isolate()->ClearSerializerData(); 824 isolate()->ClearSerializerData();
824 mark_compact_collector()->SetFlags(kMakeHeapIterableMask | 825 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
825 kReduceMemoryFootprintMask);
826 isolate_->compilation_cache()->Clear(); 826 isolate_->compilation_cache()->Clear();
827 const int kMaxNumberOfAttempts = 7; 827 const int kMaxNumberOfAttempts = 7;
828 const int kMinNumberOfAttempts = 2; 828 const int kMinNumberOfAttempts = 2;
829 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 829 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
830 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL, 830 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
831 v8::kGCCallbackFlagForced) && 831 v8::kGCCallbackFlagForced) &&
832 attempt + 1 >= kMinNumberOfAttempts) { 832 attempt + 1 >= kMinNumberOfAttempts) {
833 break; 833 break;
834 } 834 }
835 } 835 }
836 mark_compact_collector()->SetFlags(kNoGCFlags); 836 set_current_gc_flags(kNoGCFlags);
837 new_space_.Shrink(); 837 new_space_.Shrink();
838 UncommitFromSpace(); 838 UncommitFromSpace();
839 } 839 }
840 840
841 841
842 void Heap::EnsureFillerObjectAtTop() { 842 void Heap::EnsureFillerObjectAtTop() {
843 // There may be an allocation memento behind every object in new space. 843 // There may be an allocation memento behind every object in new space.
844 // If we evacuate a not full new space or if we are on the last page of 844 // If we evacuate a not full new space or if we are on the last page of
845 // the new space, then there may be uninitialized memory behind the top 845 // the new space, then there may be uninitialized memory behind the top
846 // pointer of the new space page. We store a filler object there to 846 // pointer of the new space page. We store a filler object there to
(...skipping 27 matching lines...) Expand all
874 #endif 874 #endif
875 875
876 EnsureFillerObjectAtTop(); 876 EnsureFillerObjectAtTop();
877 877
878 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { 878 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
879 if (FLAG_trace_incremental_marking) { 879 if (FLAG_trace_incremental_marking) {
880 PrintF("[IncrementalMarking] Scavenge during marking.\n"); 880 PrintF("[IncrementalMarking] Scavenge during marking.\n");
881 } 881 }
882 } 882 }
883 883
884 if (collector == MARK_COMPACTOR && 884 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
885 !mark_compact_collector()->finalize_incremental_marking() && 885 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
886 !mark_compact_collector()->abort_incremental_marking() &&
887 !incremental_marking()->IsStopped() &&
888 !incremental_marking()->should_hurry() && FLAG_incremental_marking) { 886 !incremental_marking()->should_hurry() && FLAG_incremental_marking) {
889 // Make progress in incremental marking. 887 // Make progress in incremental marking.
890 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; 888 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
891 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, 889 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
892 IncrementalMarking::NO_GC_VIA_STACK_GUARD); 890 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
893 if (!incremental_marking()->IsComplete() && 891 if (!incremental_marking()->IsComplete() &&
894 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) { 892 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
895 if (FLAG_trace_incremental_marking) { 893 if (FLAG_trace_incremental_marking) {
896 PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); 894 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
897 } 895 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
948 tracer()->Stop(collector); 946 tracer()->Stop(collector);
949 } 947 }
950 948
951 if (collector == MARK_COMPACTOR && 949 if (collector == MARK_COMPACTOR &&
952 (gc_callback_flags & kGCCallbackFlagForced) != 0) { 950 (gc_callback_flags & kGCCallbackFlagForced) != 0) {
953 isolate()->CountUsage(v8::Isolate::kForcedGC); 951 isolate()->CountUsage(v8::Isolate::kForcedGC);
954 } 952 }
955 953
956 // Start incremental marking for the next cycle. The heap snapshot 954 // Start incremental marking for the next cycle. The heap snapshot
957 // generator needs incremental marking to stay off after it aborted. 955 // generator needs incremental marking to stay off after it aborted.
958 if (!mark_compact_collector()->abort_incremental_marking() && 956 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
959 incremental_marking()->IsStopped() &&
960 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 957 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
961 incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); 958 incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
962 } 959 }
963 960
964 return next_gc_likely_to_collect_more; 961 return next_gc_likely_to_collect_more;
965 } 962 }
966 963
967 964
968 int Heap::NotifyContextDisposed(bool dependant_context) { 965 int Heap::NotifyContextDisposed(bool dependant_context) {
969 if (!dependant_context) { 966 if (!dependant_context) {
(...skipping 3784 matching lines...) Expand 10 before | Expand all | Expand 10 after
4754 // Fragmentation is high if committed > 2 * used + kSlack. 4751 // Fragmentation is high if committed > 2 * used + kSlack.
4755 // Rewrite the exression to avoid overflow. 4752 // Rewrite the exression to avoid overflow.
4756 return committed - used > used + kSlack; 4753 return committed - used > used + kSlack;
4757 } 4754 }
4758 4755
4759 4756
4760 void Heap::ReduceNewSpaceSize() { 4757 void Heap::ReduceNewSpaceSize() {
4761 // TODO(ulan): Unify this constant with the similar constant in 4758 // TODO(ulan): Unify this constant with the similar constant in
4762 // GCIdleTimeHandler once the change is merged to 4.5. 4759 // GCIdleTimeHandler once the change is merged to 4.5.
4763 static const size_t kLowAllocationThroughput = 1000; 4760 static const size_t kLowAllocationThroughput = 1000;
4764 size_t allocation_throughput = 4761 const size_t allocation_throughput =
4765 tracer()->CurrentAllocationThroughputInBytesPerMillisecond(); 4762 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4766 if (FLAG_predictable || allocation_throughput == 0) return; 4763
4767 if (allocation_throughput < kLowAllocationThroughput) { 4764 if (FLAG_predictable) return;
4765
4766 if (ShouldReduceMemory() ||
4767 ((allocation_throughput != 0) &&
4768 (allocation_throughput < kLowAllocationThroughput))) {
4768 new_space_.Shrink(); 4769 new_space_.Shrink();
4769 UncommitFromSpace(); 4770 UncommitFromSpace();
4770 } 4771 }
4771 } 4772 }
4772 4773
4773 4774
4774 bool Heap::TryFinalizeIdleIncrementalMarking( 4775 bool Heap::TryFinalizeIdleIncrementalMarking(
4775 double idle_time_in_ms, size_t size_of_objects, 4776 double idle_time_in_ms, size_t size_of_objects,
4776 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) { 4777 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4777 if (FLAG_overapproximate_weak_closure && 4778 if (FLAG_overapproximate_weak_closure &&
4778 (incremental_marking()->IsReadyToOverApproximateWeakClosure() || 4779 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4779 (!incremental_marking()->weak_closure_was_overapproximated() && 4780 (!incremental_marking()->weak_closure_was_overapproximated() &&
4780 mark_compact_collector_.marking_deque()->IsEmpty() && 4781 mark_compact_collector_.marking_deque()->IsEmpty() &&
4781 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( 4782 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
4782 static_cast<size_t>(idle_time_in_ms))))) { 4783 static_cast<size_t>(idle_time_in_ms))))) {
4783 OverApproximateWeakClosure( 4784 OverApproximateWeakClosure(
4784 "Idle notification: overapproximate weak closure"); 4785 "Idle notification: overapproximate weak closure");
4785 return true; 4786 return true;
4786 } else if (incremental_marking()->IsComplete() || 4787 } else if (incremental_marking()->IsComplete() ||
4787 (mark_compact_collector_.marking_deque()->IsEmpty() && 4788 (mark_compact_collector_.marking_deque()->IsEmpty() &&
4788 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( 4789 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4789 static_cast<size_t>(idle_time_in_ms), size_of_objects, 4790 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4790 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4791 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4791 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); 4792 CollectAllGarbage(current_gc_flags(),
4793 "idle notification: finalize incremental");
4792 return true; 4794 return true;
4793 } 4795 }
4794 return false; 4796 return false;
4795 } 4797 }
4796 4798
4797 4799
4798 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { 4800 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
4799 GCIdleTimeHandler::HeapState heap_state; 4801 GCIdleTimeHandler::HeapState heap_state;
4800 heap_state.contexts_disposed = contexts_disposed_; 4802 heap_state.contexts_disposed = contexts_disposed_;
4801 heap_state.contexts_disposal_rate = 4803 heap_state.contexts_disposal_rate =
(...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after
5642 // memory-constrained devices. 5644 // memory-constrained devices.
5643 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice || 5645 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
5644 FLAG_optimize_for_size) { 5646 FLAG_optimize_for_size) {
5645 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained); 5647 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5646 } 5648 }
5647 5649
5648 if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) { 5650 if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
5649 factor = Min(factor, kConservativeHeapGrowingFactor); 5651 factor = Min(factor, kConservativeHeapGrowingFactor);
5650 } 5652 }
5651 5653
5652 if (FLAG_stress_compaction || 5654 if (FLAG_stress_compaction || ShouldReduceMemory()) {
5653 mark_compact_collector()->reduce_memory_footprint_) {
5654 factor = kMinHeapGrowingFactor; 5655 factor = kMinHeapGrowingFactor;
5655 } 5656 }
5656 5657
5657 old_generation_allocation_limit_ = 5658 old_generation_allocation_limit_ =
5658 CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5659 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5659 5660
5660 if (FLAG_trace_gc_verbose) { 5661 if (FLAG_trace_gc_verbose) {
5661 PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX 5662 PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
5662 "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", 5663 "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
5663 old_gen_size / KB, old_generation_allocation_limit_ / KB, 5664 old_gen_size / KB, old_generation_allocation_limit_ / KB,
(...skipping 1224 matching lines...) Expand 10 before | Expand all | Expand 10 after
6888 *object_type = "CODE_TYPE"; \ 6889 *object_type = "CODE_TYPE"; \
6889 *object_sub_type = "CODE_AGE/" #name; \ 6890 *object_sub_type = "CODE_AGE/" #name; \
6890 return true; 6891 return true;
6891 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6892 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6892 #undef COMPARE_AND_RETURN_NAME 6893 #undef COMPARE_AND_RETURN_NAME
6893 } 6894 }
6894 return false; 6895 return false;
6895 } 6896 }
6896 } // namespace internal 6897 } // namespace internal
6897 } // namespace v8 6898 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698