Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: src/heap/heap.cc

Issue 1090963002: Use smaller heap growing factor in idle notification to start incremental marking when there is idl… (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 1113 matching lines...) Expand 10 before | Expand all | Expand 10 after
1124 incremental_marking()->NotifyOfHighPromotionRate(); 1124 incremental_marking()->NotifyOfHighPromotionRate();
1125 } 1125 }
1126 1126
1127 if (collector == MARK_COMPACTOR) { 1127 if (collector == MARK_COMPACTOR) {
1128 // Perform mark-sweep with optional compaction. 1128 // Perform mark-sweep with optional compaction.
1129 MarkCompact(); 1129 MarkCompact();
1130 sweep_generation_++; 1130 sweep_generation_++;
1131 // Temporarily set the limit for case when PostGarbageCollectionProcessing 1131 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1132 // allocates and triggers GC. The real limit is set at after 1132 // allocates and triggers GC. The real limit is set at after
1133 // PostGarbageCollectionProcessing. 1133 // PostGarbageCollectionProcessing.
1134 old_generation_allocation_limit_ = 1134 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1135 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1136 old_gen_exhausted_ = false; 1135 old_gen_exhausted_ = false;
1137 old_generation_size_configured_ = true; 1136 old_generation_size_configured_ = true;
1138 } else { 1137 } else {
1139 Scavenge(); 1138 Scavenge();
1140 } 1139 }
1141 1140
1142 UpdateSurvivalStatistics(start_new_space_size); 1141 UpdateSurvivalStatistics(start_new_space_size);
1143 ConfigureInitialOldGenerationSize(); 1142 ConfigureInitialOldGenerationSize();
1144 1143
1145 isolate_->counters()->objs_since_last_young()->Set(0); 1144 isolate_->counters()->objs_since_last_young()->Set(0);
(...skipping 13 matching lines...) Expand all
1159 1158
1160 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1159 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1161 1160
1162 // Update relocatables. 1161 // Update relocatables.
1163 Relocatable::PostGarbageCollectionProcessing(isolate_); 1162 Relocatable::PostGarbageCollectionProcessing(isolate_);
1164 1163
1165 if (collector == MARK_COMPACTOR) { 1164 if (collector == MARK_COMPACTOR) {
1166 // Register the amount of external allocated memory. 1165 // Register the amount of external allocated memory.
1167 amount_of_external_allocated_memory_at_last_global_gc_ = 1166 amount_of_external_allocated_memory_at_last_global_gc_ =
1168 amount_of_external_allocated_memory_; 1167 amount_of_external_allocated_memory_;
1169 old_generation_allocation_limit_ = OldGenerationAllocationLimit( 1168 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1170 PromotedSpaceSizeOfObjects(), freed_global_handles); 1169 freed_global_handles);
1171 // We finished a marking cycle. We can uncommit the marking deque until 1170 // We finished a marking cycle. We can uncommit the marking deque until
1172 // we start marking again. 1171 // we start marking again.
1173 mark_compact_collector_.UncommitMarkingDeque(); 1172 mark_compact_collector_.UncommitMarkingDeque();
1174 } 1173 }
1175 1174
1176 { 1175 {
1177 GCCallbacksScope scope(this); 1176 GCCallbacksScope scope(this);
1178 if (scope.CheckReenter()) { 1177 if (scope.CheckReenter()) {
1179 AllowHeapAllocation allow_allocation; 1178 AllowHeapAllocation allow_allocation;
1180 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 1179 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
(...skipping 3360 matching lines...) Expand 10 before | Expand all | Expand 10 after
4541 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4540 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4542 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); 4541 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4543 return true; 4542 return true;
4544 } 4543 }
4545 return false; 4544 return false;
4546 } 4545 }
4547 4546
4548 4547
4549 bool Heap::WorthActivatingIncrementalMarking() { 4548 bool Heap::WorthActivatingIncrementalMarking() {
4550 return incremental_marking()->IsStopped() && 4549 return incremental_marking()->IsStopped() &&
4551 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull(); 4550 incremental_marking()->ShouldActivate();
4552 } 4551 }
4553 4552
4554 4553
4555 static double MonotonicallyIncreasingTimeInMs() { 4554 static double MonotonicallyIncreasingTimeInMs() {
4556 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * 4555 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4557 static_cast<double>(base::Time::kMillisecondsPerSecond); 4556 static_cast<double>(base::Time::kMillisecondsPerSecond);
4558 } 4557 }
4559 4558
4560 4559
4561 bool Heap::IdleNotification(int idle_time_in_ms) { 4560 bool Heap::IdleNotification(int idle_time_in_ms) {
4562 return IdleNotification( 4561 return IdleNotification(
4563 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() + 4562 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4564 (static_cast<double>(idle_time_in_ms) / 4563 (static_cast<double>(idle_time_in_ms) /
4565 static_cast<double>(base::Time::kMillisecondsPerSecond))); 4564 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4566 } 4565 }
4567 4566
4568 4567
4569 bool Heap::IdleNotification(double deadline_in_seconds) { 4568 bool Heap::IdleNotification(double deadline_in_seconds) {
4570 CHECK(HasBeenSetUp()); // http://crbug.com/425035 4569 CHECK(HasBeenSetUp()); // http://crbug.com/425035
4571 double deadline_in_ms = 4570 double deadline_in_ms =
4572 deadline_in_seconds * 4571 deadline_in_seconds *
4573 static_cast<double>(base::Time::kMillisecondsPerSecond); 4572 static_cast<double>(base::Time::kMillisecondsPerSecond);
4574 HistogramTimerScope idle_notification_scope( 4573 HistogramTimerScope idle_notification_scope(
4575 isolate_->counters()->gc_idle_notification()); 4574 isolate_->counters()->gc_idle_notification());
4575 double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4576 4576
4577 GCIdleTimeHandler::HeapState heap_state; 4577 GCIdleTimeHandler::HeapState heap_state;
4578 heap_state.contexts_disposed = contexts_disposed_; 4578 heap_state.contexts_disposed = contexts_disposed_;
4579 heap_state.contexts_disposal_rate = 4579 heap_state.contexts_disposal_rate =
4580 tracer()->ContextDisposalRateInMilliseconds(); 4580 tracer()->ContextDisposalRateInMilliseconds();
4581 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects()); 4581 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4582 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped(); 4582 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4583 // TODO(ulan): Start incremental marking only for large heaps. 4583 // TODO(ulan): Start incremental marking only for large heaps.
4584 intptr_t limit = old_generation_allocation_limit_;
4585 if (static_cast<size_t>(idle_time_in_ms) >
4586 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4587 limit = idle_old_generation_allocation_limit_;
4588 }
4589
4584 heap_state.can_start_incremental_marking = 4590 heap_state.can_start_incremental_marking =
4585 incremental_marking()->ShouldActivate() && FLAG_incremental_marking && 4591 incremental_marking()->WorthActivating() &&
4592 NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking &&
4586 !mark_compact_collector()->sweeping_in_progress(); 4593 !mark_compact_collector()->sweeping_in_progress();
4587 heap_state.sweeping_in_progress = 4594 heap_state.sweeping_in_progress =
4588 mark_compact_collector()->sweeping_in_progress(); 4595 mark_compact_collector()->sweeping_in_progress();
4589 heap_state.sweeping_completed = 4596 heap_state.sweeping_completed =
4590 mark_compact_collector()->IsSweepingCompleted(); 4597 mark_compact_collector()->IsSweepingCompleted();
4591 heap_state.mark_compact_speed_in_bytes_per_ms = 4598 heap_state.mark_compact_speed_in_bytes_per_ms =
4592 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond()); 4599 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4593 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>( 4600 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4594 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); 4601 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4595 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms = 4602 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
4596 static_cast<size_t>( 4603 static_cast<size_t>(
4597 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()); 4604 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4598 heap_state.scavenge_speed_in_bytes_per_ms = 4605 heap_state.scavenge_speed_in_bytes_per_ms =
4599 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond()); 4606 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4600 heap_state.used_new_space_size = new_space_.Size(); 4607 heap_state.used_new_space_size = new_space_.Size();
4601 heap_state.new_space_capacity = new_space_.Capacity(); 4608 heap_state.new_space_capacity = new_space_.Capacity();
4602 heap_state.new_space_allocation_throughput_in_bytes_per_ms = 4609 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4603 static_cast<size_t>( 4610 static_cast<size_t>(
4604 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond()); 4611 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4605 4612
4606 double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4607 GCIdleTimeAction action = 4613 GCIdleTimeAction action =
4608 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state); 4614 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4609 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample( 4615 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4610 static_cast<int>(idle_time_in_ms)); 4616 static_cast<int>(idle_time_in_ms));
4611 4617
4612 bool result = false; 4618 bool result = false;
4613 switch (action.type) { 4619 switch (action.type) {
4614 case DONE: 4620 case DONE:
4615 result = true; 4621 result = true;
4616 break; 4622 break;
(...skipping 554 matching lines...) Expand 10 before | Expand all | Expand 10 after
5171 5177
5172 int64_t Heap::PromotedExternalMemorySize() { 5178 int64_t Heap::PromotedExternalMemorySize() {
5173 if (amount_of_external_allocated_memory_ <= 5179 if (amount_of_external_allocated_memory_ <=
5174 amount_of_external_allocated_memory_at_last_global_gc_) 5180 amount_of_external_allocated_memory_at_last_global_gc_)
5175 return 0; 5181 return 0;
5176 return amount_of_external_allocated_memory_ - 5182 return amount_of_external_allocated_memory_ -
5177 amount_of_external_allocated_memory_at_last_global_gc_; 5183 amount_of_external_allocated_memory_at_last_global_gc_;
5178 } 5184 }
5179 5185
5180 5186
5181 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, 5187 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5182 int freed_global_handles) { 5188 intptr_t old_gen_size) {
5189 CHECK(factor > 1.0);
5190 CHECK(old_gen_size > 0);
5191 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5192 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5193 limit += new_space_.Capacity();
5194 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5195 return Min(limit, halfway_to_the_max);
5196 }
5197
5198
5199 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5200 int freed_global_handles) {
5183 const int kMaxHandles = 1000; 5201 const int kMaxHandles = 1000;
5184 const int kMinHandles = 100; 5202 const int kMinHandles = 100;
5185 double min_factor = 1.1; 5203 const double min_factor = 1.1;
5186 double max_factor = 4; 5204 double max_factor = 4;
5205 const double idle_max_factor = 1.5;
5187 // We set the old generation growing factor to 2 to grow the heap slower on 5206 // We set the old generation growing factor to 2 to grow the heap slower on
5188 // memory-constrained devices. 5207 // memory-constrained devices.
5189 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { 5208 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5190 max_factor = 2; 5209 max_factor = 2;
5191 } 5210 }
5211
5192 // If there are many freed global handles, then the next full GC will 5212 // If there are many freed global handles, then the next full GC will
5193 // likely collect a lot of garbage. Choose the heap growing factor 5213 // likely collect a lot of garbage. Choose the heap growing factor
5194 // depending on freed global handles. 5214 // depending on freed global handles.
5195 // TODO(ulan, hpayer): Take into account mutator utilization. 5215 // TODO(ulan, hpayer): Take into account mutator utilization.
5216 // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
5217 // Look into that.
5196 double factor; 5218 double factor;
5197 if (freed_global_handles <= kMinHandles) { 5219 if (freed_global_handles <= kMinHandles) {
5198 factor = max_factor; 5220 factor = max_factor;
5199 } else if (freed_global_handles >= kMaxHandles) { 5221 } else if (freed_global_handles >= kMaxHandles) {
5200 factor = min_factor; 5222 factor = min_factor;
5201 } else { 5223 } else {
5202 // Compute factor using linear interpolation between points 5224 // Compute factor using linear interpolation between points
5203 // (kMinHandles, max_factor) and (kMaxHandles, min_factor). 5225 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5204 factor = max_factor - 5226 factor = max_factor -
5205 (freed_global_handles - kMinHandles) * (max_factor - min_factor) / 5227 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5206 (kMaxHandles - kMinHandles); 5228 (kMaxHandles - kMinHandles);
5207 } 5229 }
5208 5230
5209 if (FLAG_stress_compaction || 5231 if (FLAG_stress_compaction ||
5210 mark_compact_collector()->reduce_memory_footprint_) { 5232 mark_compact_collector()->reduce_memory_footprint_) {
5211 factor = min_factor; 5233 factor = min_factor;
5212 } 5234 }
5213 5235
5214 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); 5236 old_generation_allocation_limit_ =
5215 limit = Max(limit, kMinimumOldGenerationAllocationLimit); 5237 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5216 limit += new_space_.Capacity(); 5238 idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5217 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; 5239 Min(factor, idle_max_factor), old_gen_size);
5218 return Min(limit, halfway_to_the_max);
5219 } 5240 }
5220 5241
5221 5242
5222 void Heap::EnableInlineAllocation() { 5243 void Heap::EnableInlineAllocation() {
5223 if (!inline_allocation_disabled_) return; 5244 if (!inline_allocation_disabled_) return;
5224 inline_allocation_disabled_ = false; 5245 inline_allocation_disabled_ = false;
5225 5246
5226 // Update inline allocation limit for new space. 5247 // Update inline allocation limit for new space.
5227 new_space()->UpdateInlineAllocationLimit(0); 5248 new_space()->UpdateInlineAllocationLimit(0);
5228 } 5249 }
(...skipping 1125 matching lines...) Expand 10 before | Expand all | Expand 10 after
6354 static_cast<int>(object_sizes_last_time_[index])); 6375 static_cast<int>(object_sizes_last_time_[index]));
6355 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 6376 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6356 #undef ADJUST_LAST_TIME_OBJECT_COUNT 6377 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6357 6378
6358 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 6379 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6359 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 6380 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6360 ClearObjectStats(); 6381 ClearObjectStats();
6361 } 6382 }
6362 } 6383 }
6363 } // namespace v8::internal 6384 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698