| Index: src/heap.cc
|
| diff --git a/src/heap.cc b/src/heap.cc
|
| index db13637c9aa9222c631d461ad227f45f727d43fe..064ae39825ce8cfc5d87f9a23be958f319b79bd5 100644
|
| --- a/src/heap.cc
|
| +++ b/src/heap.cc
|
| @@ -61,7 +61,6 @@ Heap::Heap()
|
| // Will be 4 * reserved_semispace_size_ to ensure that young
|
| // generation can be aligned to its size.
|
| maximum_committed_(0),
|
| - old_space_growing_factor_(4),
|
| survived_since_last_expansion_(0),
|
| sweep_generation_(0),
|
| always_allocate_scope_depth_(0),
|
| @@ -90,7 +89,6 @@ Heap::Heap()
|
| allocation_timeout_(0),
|
| #endif // DEBUG
|
| old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
|
| - size_of_old_gen_at_last_old_space_gc_(0),
|
| old_gen_exhausted_(false),
|
| inline_allocation_disabled_(false),
|
| store_buffer_rebuilder_(store_buffer()),
|
| @@ -1056,7 +1054,7 @@ bool Heap::PerformGarbageCollection(
|
| GarbageCollector collector,
|
| GCTracer* tracer,
|
| const v8::GCCallbackFlags gc_callback_flags) {
|
| - bool next_gc_likely_to_collect_more = false;
|
| + int freed_global_handles = 0;
|
|
|
| if (collector != SCAVENGER) {
|
| PROFILE(isolate_, CodeMovingGCEvent());
|
| @@ -1096,12 +1094,11 @@ bool Heap::PerformGarbageCollection(
|
| // Perform mark-sweep with optional compaction.
|
| MarkCompact(tracer);
|
| sweep_generation_++;
|
| -
|
| - size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
|
| -
|
| + // Temporarily set the limit for case when PostGarbageCollectionProcessing
|
| + // allocates and triggers GC. The real limit is set at after
|
| + // PostGarbageCollectionProcessing.
|
| old_generation_allocation_limit_ =
|
| - OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
|
| -
|
| + OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
|
| old_gen_exhausted_ = false;
|
| } else {
|
| tracer_ = tracer;
|
| @@ -1120,7 +1117,7 @@ bool Heap::PerformGarbageCollection(
|
| gc_post_processing_depth_++;
|
| { AllowHeapAllocation allow_allocation;
|
| GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
| - next_gc_likely_to_collect_more =
|
| + freed_global_handles =
|
| isolate_->global_handles()->PostGarbageCollectionProcessing(
|
| collector, tracer);
|
| }
|
| @@ -1135,6 +1132,9 @@ bool Heap::PerformGarbageCollection(
|
| // Register the amount of external allocated memory.
|
| amount_of_external_allocated_memory_at_last_global_gc_ =
|
| amount_of_external_allocated_memory_;
|
| + old_generation_allocation_limit_ =
|
| + OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
|
| + freed_global_handles);
|
| }
|
|
|
| { GCCallbacksScope scope(this);
|
| @@ -1153,7 +1153,7 @@ bool Heap::PerformGarbageCollection(
|
| }
|
| #endif
|
|
|
| - return next_gc_likely_to_collect_more;
|
| + return freed_global_handles > 0;
|
| }
|
|
|
|
|
| @@ -4989,12 +4989,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
|
|
|
| code_range_size_ = code_range_size * MB;
|
|
|
| - // We set the old generation growing factor to 2 to grow the heap slower on
|
| - // memory-constrained devices.
|
| - if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
|
| - old_space_growing_factor_ = 2;
|
| - }
|
| -
|
| configured_ = true;
|
| return true;
|
| }
|
| @@ -5063,6 +5057,47 @@ int64_t Heap::PromotedExternalMemorySize() {
|
| }
|
|
|
|
|
| +intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
|
| + int freed_global_handles) {
|
| + const int kMaxHandles = 1000;
|
| + const int kMinHandles = 100;
|
| + double min_factor = 1.1;
|
| + double max_factor = 4;
|
| + // We set the old generation growing factor to 2 to grow the heap slower on
|
| + // memory-constrained devices.
|
| + if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
|
| + max_factor = 2;
|
| + }
|
| + // If there are many freed global handles, then the next full GC will
|
| + // likely collect a lot of garbage. Choose the heap growing factor
|
| + // depending on freed global handles.
|
| + // TODO(ulan, hpayer): Take into account mutator utilization.
|
| + double factor;
|
| + if (freed_global_handles <= kMinHandles) {
|
| + factor = max_factor;
|
| + } else if (freed_global_handles >= kMaxHandles) {
|
| + factor = min_factor;
|
| + } else {
|
| + // Compute factor using linear interpolation between points
|
| + // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
|
| + factor = max_factor -
|
| + (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
|
| + (kMaxHandles - kMinHandles);
|
| + }
|
| +
|
| + if (FLAG_stress_compaction ||
|
| + mark_compact_collector()->reduce_memory_footprint_) {
|
| + factor = min_factor;
|
| + }
|
| +
|
| + intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
|
| + limit = Max(limit, kMinimumOldGenerationAllocationLimit);
|
| + limit += new_space_.Capacity();
|
| + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
|
| + return Min(limit, halfway_to_the_max);
|
| +}
|
| +
|
| +
|
| void Heap::EnableInlineAllocation() {
|
| if (!inline_allocation_disabled_) return;
|
| inline_allocation_disabled_ = false;
|
|
|