Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(343)

Side by Side Diff: src/heap/heap.cc

Issue 1308363002: Revert of Version 4.5.103.23 (cherry-pick) (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@4.5
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 757 matching lines...) Expand 10 before | Expand all | Expand 10 after
768 } 768 }
769 // We must not compact the weak fixed list here, as we may be in the middle 769 // We must not compact the weak fixed list here, as we may be in the middle
770 // of writing to it, when the GC triggered. Instead, we reset the root value. 770 // of writing to it, when the GC triggered. Instead, we reset the root value.
771 set_weak_stack_trace_list(Smi::FromInt(0)); 771 set_weak_stack_trace_list(Smi::FromInt(0));
772 } 772 }
773 773
774 774
775 void Heap::HandleGCRequest() { 775 void Heap::HandleGCRequest() {
776 if (incremental_marking()->request_type() == 776 if (incremental_marking()->request_type() ==
777 IncrementalMarking::COMPLETE_MARKING) { 777 IncrementalMarking::COMPLETE_MARKING) {
778 CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt", 778 CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
779 incremental_marking()->CallbackFlags());
780 return; 779 return;
781 } 780 }
782 DCHECK(FLAG_overapproximate_weak_closure); 781 DCHECK(FLAG_overapproximate_weak_closure);
783 if (!incremental_marking()->weak_closure_was_overapproximated()) { 782 if (!incremental_marking()->weak_closure_was_overapproximated()) {
784 OverApproximateWeakClosure("GC interrupt"); 783 OverApproximateWeakClosure("GC interrupt");
785 } 784 }
786 } 785 }
787 786
788 787
789 void Heap::OverApproximateWeakClosure(const char* gc_reason) { 788 void Heap::OverApproximateWeakClosure(const char* gc_reason) {
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
979 if (collector == MARK_COMPACTOR && 978 if (collector == MARK_COMPACTOR &&
980 (gc_callback_flags & kGCCallbackFlagForced) != 0) { 979 (gc_callback_flags & kGCCallbackFlagForced) != 0) {
981 isolate()->CountUsage(v8::Isolate::kForcedGC); 980 isolate()->CountUsage(v8::Isolate::kForcedGC);
982 } 981 }
983 982
984 // Start incremental marking for the next cycle. The heap snapshot 983 // Start incremental marking for the next cycle. The heap snapshot
985 // generator needs incremental marking to stay off after it aborted. 984 // generator needs incremental marking to stay off after it aborted.
986 if (!mark_compact_collector()->abort_incremental_marking() && 985 if (!mark_compact_collector()->abort_incremental_marking() &&
987 incremental_marking()->IsStopped() && 986 incremental_marking()->IsStopped() &&
988 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 987 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
989 incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); 988 incremental_marking()->Start(kNoGCFlags);
990 } 989 }
991 990
992 return next_gc_likely_to_collect_more; 991 return next_gc_likely_to_collect_more;
993 } 992 }
994 993
995 994
996 int Heap::NotifyContextDisposed(bool dependant_context) { 995 int Heap::NotifyContextDisposed(bool dependant_context) {
997 if (!dependant_context) { 996 if (!dependant_context) {
998 tracer()->ResetSurvivalEvents(); 997 tracer()->ResetSurvivalEvents();
999 old_generation_size_configured_ = false; 998 old_generation_size_configured_ = false;
1000 } 999 }
1001 if (isolate()->concurrent_recompilation_enabled()) { 1000 if (isolate()->concurrent_recompilation_enabled()) {
1002 // Flush the queued recompilation tasks. 1001 // Flush the queued recompilation tasks.
1003 isolate()->optimizing_compile_dispatcher()->Flush(); 1002 isolate()->optimizing_compile_dispatcher()->Flush();
1004 } 1003 }
1005 AgeInlineCaches(); 1004 AgeInlineCaches();
1006 set_retained_maps(ArrayList::cast(empty_fixed_array())); 1005 set_retained_maps(ArrayList::cast(empty_fixed_array()));
1007 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); 1006 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
1008 MemoryReducer::Event event; 1007 MemoryReducer::Event event;
1009 event.type = MemoryReducer::kContextDisposed; 1008 event.type = MemoryReducer::kContextDisposed;
1010 event.time_ms = MonotonicallyIncreasingTimeInMs(); 1009 event.time_ms = MonotonicallyIncreasingTimeInMs();
1011 memory_reducer_.NotifyContextDisposed(event); 1010 memory_reducer_.NotifyContextDisposed(event);
1012 return ++contexts_disposed_; 1011 return ++contexts_disposed_;
1013 } 1012 }
1014 1013
1015 1014
1016 void Heap::StartIncrementalMarking(int gc_flags,
1017 const GCCallbackFlags gc_callback_flags,
1018 const char* reason) {
1019 DCHECK(incremental_marking()->IsStopped());
1020 incremental_marking()->Start(gc_flags, gc_callback_flags, reason);
1021 }
1022
1023
1024 void Heap::StartIdleIncrementalMarking() { 1015 void Heap::StartIdleIncrementalMarking() {
1025 gc_idle_time_handler_.ResetNoProgressCounter(); 1016 gc_idle_time_handler_.ResetNoProgressCounter();
1026 StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags, 1017 incremental_marking()->Start(kReduceMemoryFootprintMask);
1027 "idle");
1028 } 1018 }
1029 1019
1030 1020
1031 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, 1021 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1032 int len) { 1022 int len) {
1033 if (len == 0) return; 1023 if (len == 0) return;
1034 1024
1035 DCHECK(array->map() != fixed_cow_array_map()); 1025 DCHECK(array->map() != fixed_cow_array_map());
1036 Object** dst_objects = array->data_start() + dst_index; 1026 Object** dst_objects = array->data_start() + dst_index;
1037 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); 1027 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
(...skipping 3727 matching lines...) Expand 10 before | Expand all | Expand 10 after
4765 if (!IsHeapIterable()) { 4755 if (!IsHeapIterable()) {
4766 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); 4756 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4767 } 4757 }
4768 if (mark_compact_collector()->sweeping_in_progress()) { 4758 if (mark_compact_collector()->sweeping_in_progress()) {
4769 mark_compact_collector()->EnsureSweepingCompleted(); 4759 mark_compact_collector()->EnsureSweepingCompleted();
4770 } 4760 }
4771 DCHECK(IsHeapIterable()); 4761 DCHECK(IsHeapIterable());
4772 } 4762 }
4773 4763
4774 4764
4775 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4776 const double kMinMutatorUtilization = 0.0;
4777 const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4778 if (mutator_speed == 0) return kMinMutatorUtilization;
4779 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4780 // Derivation:
4781 // mutator_utilization = mutator_time / (mutator_time + gc_time)
4782 // mutator_time = 1 / mutator_speed
4783 // gc_time = 1 / gc_speed
4784 // mutator_utilization = (1 / mutator_speed) /
4785 // (1 / mutator_speed + 1 / gc_speed)
4786 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4787 return gc_speed / (mutator_speed + gc_speed);
4788 }
4789
4790
4791 double Heap::YoungGenerationMutatorUtilization() {
4792 double mutator_speed = static_cast<double>(
4793 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4794 double gc_speed = static_cast<double>(
4795 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
4796 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4797 if (FLAG_trace_mutator_utilization) {
4798 PrintIsolate(isolate(),
4799 "Young generation mutator utilization = %.3f ("
4800 "mutator_speed=%.f, gc_speed=%.f)\n",
4801 result, mutator_speed, gc_speed);
4802 }
4803 return result;
4804 }
4805
4806
4807 double Heap::OldGenerationMutatorUtilization() {
4808 double mutator_speed = static_cast<double>(
4809 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4810 double gc_speed = static_cast<double>(
4811 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4812 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4813 if (FLAG_trace_mutator_utilization) {
4814 PrintIsolate(isolate(),
4815 "Old generation mutator utilization = %.3f ("
4816 "mutator_speed=%.f, gc_speed=%.f)\n",
4817 result, mutator_speed, gc_speed);
4818 }
4819 return result;
4820 }
4821
4822
4823 bool Heap::HasLowYoungGenerationAllocationRate() { 4765 bool Heap::HasLowYoungGenerationAllocationRate() {
4824 const double high_mutator_utilization = 0.993; 4766 const double high_mutator_utilization = 0.993;
4825 return YoungGenerationMutatorUtilization() > high_mutator_utilization; 4767 double mutator_speed = static_cast<double>(
4768 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4769 double gc_speed = static_cast<double>(
4770 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
4771 if (mutator_speed == 0 || gc_speed == 0) return false;
4772 double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
4773 return mutator_utilization > high_mutator_utilization;
4826 } 4774 }
4827 4775
4828 4776
4829 bool Heap::HasLowOldGenerationAllocationRate() { 4777 bool Heap::HasLowOldGenerationAllocationRate() {
4830 const double high_mutator_utilization = 0.993; 4778 const double high_mutator_utilization = 0.993;
4831 return OldGenerationMutatorUtilization() > high_mutator_utilization; 4779 double mutator_speed = static_cast<double>(
4780 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4781 double gc_speed = static_cast<double>(
4782 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4783 if (mutator_speed == 0 || gc_speed == 0) return false;
4784 double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
4785 return mutator_utilization > high_mutator_utilization;
4832 } 4786 }
4833 4787
4834 4788
4835 bool Heap::HasLowAllocationRate() { 4789 bool Heap::HasLowAllocationRate() {
4836 return HasLowYoungGenerationAllocationRate() && 4790 return HasLowYoungGenerationAllocationRate() &&
4837 HasLowOldGenerationAllocationRate(); 4791 HasLowOldGenerationAllocationRate();
4838 } 4792 }
4839 4793
4840 4794
4841 bool Heap::HasHighFragmentation() { 4795 bool Heap::HasHighFragmentation() {
4842 intptr_t used = PromotedSpaceSizeOfObjects(); 4796 intptr_t used = PromotedSpaceSizeOfObjects();
4843 intptr_t committed = CommittedOldGenerationMemory(); 4797 intptr_t committed = CommittedOldGenerationMemory();
4844 return HasHighFragmentation(used, committed); 4798 return HasHighFragmentation(used, committed);
4845 } 4799 }
4846 4800
4847 4801
4848 bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) { 4802 bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
4849 const intptr_t kSlack = 16 * MB; 4803 const intptr_t kSlack = 16 * MB;
4850 // Fragmentation is high if committed > 2 * used + kSlack. 4804 // Fragmentation is high if committed > 2 * used + kSlack.
4851 // Rewrite the exression to avoid overflow. 4805 // Rewrite the exression to avoid overflow.
4852 return committed - used > used + kSlack; 4806 return committed - used > used + kSlack;
4853 } 4807 }
4854 4808
4855 4809
4856 void Heap::ReduceNewSpaceSize() { 4810 void Heap::ReduceNewSpaceSize() {
4857 // TODO(ulan): Unify this constant with the similar constant in 4811 if (!FLAG_predictable && HasLowAllocationRate()) {
4858 // GCIdleTimeHandler once the change is merged to 4.5.
4859 static const size_t kLowAllocationThroughput = 1000;
4860 size_t allocation_throughput =
4861 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4862 if (FLAG_predictable || allocation_throughput == 0) return;
4863 if (allocation_throughput < kLowAllocationThroughput) {
4864 new_space_.Shrink(); 4812 new_space_.Shrink();
4865 UncommitFromSpace(); 4813 UncommitFromSpace();
4866 } 4814 }
4867 } 4815 }
4868 4816
4869
4870 void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
4871 if (FLAG_overapproximate_weak_closure &&
4872 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4873 (!incremental_marking()->weak_closure_was_overapproximated() &&
4874 mark_compact_collector_.marking_deque()->IsEmpty()))) {
4875 OverApproximateWeakClosure(comment);
4876 } else if (incremental_marking()->IsComplete() ||
4877 (mark_compact_collector_.marking_deque()->IsEmpty())) {
4878 CollectAllGarbage(kNoGCFlags, comment);
4879 }
4880 }
4881
4882 4817
4883 bool Heap::TryFinalizeIdleIncrementalMarking( 4818 bool Heap::TryFinalizeIdleIncrementalMarking(
4884 double idle_time_in_ms, size_t size_of_objects, 4819 double idle_time_in_ms, size_t size_of_objects,
4885 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) { 4820 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4886 if (FLAG_overapproximate_weak_closure && 4821 if (FLAG_overapproximate_weak_closure &&
4887 (incremental_marking()->IsReadyToOverApproximateWeakClosure() || 4822 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4888 (!incremental_marking()->weak_closure_was_overapproximated() && 4823 (!incremental_marking()->weak_closure_was_overapproximated() &&
4889 mark_compact_collector_.marking_deque()->IsEmpty() && 4824 mark_compact_collector_.marking_deque()->IsEmpty() &&
4890 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( 4825 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
4891 static_cast<size_t>(idle_time_in_ms))))) { 4826 static_cast<size_t>(idle_time_in_ms))))) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
4925 heap_state.scavenge_speed_in_bytes_per_ms = 4860 heap_state.scavenge_speed_in_bytes_per_ms =
4926 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond()); 4861 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4927 heap_state.used_new_space_size = new_space_.Size(); 4862 heap_state.used_new_space_size = new_space_.Size();
4928 heap_state.new_space_capacity = new_space_.Capacity(); 4863 heap_state.new_space_capacity = new_space_.Capacity();
4929 heap_state.new_space_allocation_throughput_in_bytes_per_ms = 4864 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4930 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(); 4865 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
4931 return heap_state; 4866 return heap_state;
4932 } 4867 }
4933 4868
4934 4869
4935 double Heap::AdvanceIncrementalMarking(
4936 intptr_t step_size_in_bytes, double deadline_in_ms,
4937 IncrementalMarking::StepActions step_actions) {
4938 DCHECK(!incremental_marking()->IsStopped());
4939
4940 if (step_size_in_bytes == 0) {
4941 step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
4942 static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
4943 static_cast<size_t>(
4944 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
4945 }
4946
4947 double remaining_time_in_ms = 0.0;
4948 do {
4949 incremental_marking()->Step(
4950 step_size_in_bytes, step_actions.completion_action,
4951 step_actions.force_marking, step_actions.force_completion);
4952 remaining_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4953 } while (remaining_time_in_ms >=
4954 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
4955 !incremental_marking()->IsComplete() &&
4956 !mark_compact_collector_.marking_deque()->IsEmpty());
4957 return remaining_time_in_ms;
4958 }
4959
4960
4961 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, 4870 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4962 GCIdleTimeHandler::HeapState heap_state, 4871 GCIdleTimeHandler::HeapState heap_state,
4963 double deadline_in_ms) { 4872 double deadline_in_ms) {
4964 bool result = false; 4873 bool result = false;
4965 switch (action.type) { 4874 switch (action.type) {
4966 case DONE: 4875 case DONE:
4967 result = true; 4876 result = true;
4968 break; 4877 break;
4969 case DO_INCREMENTAL_MARKING: { 4878 case DO_INCREMENTAL_MARKING: {
4970 const double remaining_idle_time_in_ms = 4879 DCHECK(!incremental_marking()->IsStopped());
4971 AdvanceIncrementalMarking(action.parameter, deadline_in_ms, 4880 double remaining_idle_time_in_ms = 0.0;
4972 IncrementalMarking::IdleStepActions()); 4881 do {
4882 incremental_marking()->Step(
4883 action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4884 IncrementalMarking::FORCE_MARKING,
4885 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
4886 remaining_idle_time_in_ms =
4887 deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4888 } while (remaining_idle_time_in_ms >=
4889 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
4890 !incremental_marking()->IsComplete() &&
4891 !mark_compact_collector_.marking_deque()->IsEmpty());
4973 if (remaining_idle_time_in_ms > 0.0) { 4892 if (remaining_idle_time_in_ms > 0.0) {
4974 action.additional_work = TryFinalizeIdleIncrementalMarking( 4893 action.additional_work = TryFinalizeIdleIncrementalMarking(
4975 remaining_idle_time_in_ms, heap_state.size_of_objects, 4894 remaining_idle_time_in_ms, heap_state.size_of_objects,
4976 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); 4895 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4977 } 4896 }
4978 break; 4897 break;
4979 } 4898 }
4980 case DO_FULL_GC: { 4899 case DO_FULL_GC: {
4981 DCHECK(contexts_disposed_ > 0); 4900 DCHECK(contexts_disposed_ > 0);
4982 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4901 HistogramTimerScope scope(isolate_->counters()->gc_context());
(...skipping 2004 matching lines...) Expand 10 before | Expand all | Expand 10 after
6987 *object_type = "CODE_TYPE"; \ 6906 *object_type = "CODE_TYPE"; \
6988 *object_sub_type = "CODE_AGE/" #name; \ 6907 *object_sub_type = "CODE_AGE/" #name; \
6989 return true; 6908 return true;
6990 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6909 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6991 #undef COMPARE_AND_RETURN_NAME 6910 #undef COMPARE_AND_RETURN_NAME
6992 } 6911 }
6993 return false; 6912 return false;
6994 } 6913 }
6995 } // namespace internal 6914 } // namespace internal
6996 } // namespace v8 6915 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698