| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index ac92f07a829785c384ae3faeb4d33073b855d5a5..732f88ae2853fdfd66df08c57e15d76d0810af09 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -80,9 +80,6 @@ Heap::Heap()
|
| max_semi_space_size_(8 * (kPointerSize / 4) * MB),
|
| initial_semispace_size_(MB),
|
| max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
|
| - initial_old_generation_size_(max_old_generation_size_ /
|
| - kInitalOldGenerationLimitFactor),
|
| - old_generation_size_configured_(false),
|
| max_executable_size_(256ul * (kPointerSize / 4) * MB),
|
| // Variables set based on semispace_size_ and old_generation_size_ in
|
| // ConfigureHeap.
|
| @@ -111,7 +108,7 @@ Heap::Heap()
|
| #ifdef DEBUG
|
| allocation_timeout_(0),
|
| #endif // DEBUG
|
| - old_generation_allocation_limit_(initial_old_generation_size_),
|
| + old_generation_allocation_limit_(0),
|
| inline_allocation_disabled_(false),
|
| total_regexp_code_generated_(0),
|
| tracer_(nullptr),
|
| @@ -1047,7 +1044,6 @@ bool Heap::CollectGarbage(GarbageCollector collector,
|
| int Heap::NotifyContextDisposed(bool dependant_context) {
|
| if (!dependant_context) {
|
| tracer()->ResetSurvivalEvents();
|
| - old_generation_size_configured_ = false;
|
| MemoryReducer::Event event;
|
| event.type = MemoryReducer::kPossibleGarbage;
|
| event.time_ms = MonotonicallyIncreasingTimeInMs();
|
| @@ -1311,7 +1307,6 @@ bool Heap::PerformGarbageCollection(
|
| UpdateOldGenerationAllocationCounter();
|
| // Perform mark-sweep with optional compaction.
|
| MarkCompact();
|
| - old_generation_size_configured_ = true;
|
| // This should be updated before PostGarbageCollectionProcessing, which
|
| // can cause another GC. Take into account the objects promoted during GC.
|
| old_generation_allocation_counter_at_last_gc_ +=
|
| @@ -1325,7 +1320,6 @@ bool Heap::PerformGarbageCollection(
|
| }
|
|
|
| UpdateSurvivalStatistics(start_new_space_size);
|
| - ConfigureInitialOldGenerationSize();
|
|
|
| isolate_->counters()->objs_since_last_young()->Set(0);
|
|
|
| @@ -1353,8 +1347,7 @@ bool Heap::PerformGarbageCollection(
|
| external_memory_at_last_mark_compact_ = external_memory_;
|
| external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
|
| SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
|
| - } else if (HasLowYoungGenerationAllocationRate() &&
|
| - old_generation_size_configured_) {
|
| + } else if (HasLowYoungGenerationAllocationRate()) {
|
| DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
|
| }
|
|
|
| @@ -2022,17 +2015,6 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
|
| }
|
|
|
|
|
| -void Heap::ConfigureInitialOldGenerationSize() {
|
| - if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
|
| - old_generation_allocation_limit_ =
|
| - Max(MinimumAllocationLimitGrowingStep(),
|
| - static_cast<intptr_t>(
|
| - static_cast<double>(old_generation_allocation_limit_) *
|
| - (tracer()->AverageSurvivalRatio() / 100)));
|
| - }
|
| -}
|
| -
|
| -
|
| AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
|
| int instance_size) {
|
| Object* result = nullptr;
|
| @@ -5082,14 +5064,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
| max_executable_size_ = max_old_generation_size_;
|
| }
|
|
|
| - if (FLAG_initial_old_space_size > 0) {
|
| - initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
|
| - } else {
|
| - initial_old_generation_size_ =
|
| - max_old_generation_size_ / kInitalOldGenerationLimitFactor;
|
| - }
|
| - old_generation_allocation_limit_ = initial_old_generation_size_;
|
| -
|
| // We rely on being able to allocate new arrays in paged spaces.
|
| DCHECK(kMaxRegularHeapObjectSize >=
|
| (JSArray::kSize +
|
| @@ -5329,6 +5303,23 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
|
| }
|
| }
|
|
|
| +intptr_t Heap::OldGenerationSpaceAvailable() {
|
| + if (old_generation_allocation_limit_ == 0) {
|
| + // Lazy initialization of allocation limit.
|
| + old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
|
| + kConservativeHeapGrowingFactor, PromotedSpaceSizeOfObjects());
|
| + }
|
| + return old_generation_allocation_limit_ - PromotedTotalSize();
|
| +}
|
| +
|
| +bool Heap::ShouldOptimizeForLoadTime() {
|
| + return isolate()->rail_mode() == PERFORMANCE_LOAD &&
|
| + PromotedTotalSize() <
|
| + max_old_generation_size_ / kInitalOldGenerationLimitFactor &&
|
| + MonotonicallyIncreasingTimeInMs() <
|
| + isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
|
| +}
|
| +
|
| // This predicate is called when an old generation space cannot allocated from
|
| // the free list and is about to add a new page. Returning false will cause a
|
| // major GC. It happens when the old generation allocation limit is reached and
|
| @@ -5340,6 +5331,8 @@ bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
|
|
|
| if (ShouldOptimizeForMemoryUsage()) return false;
|
|
|
| + if (ShouldOptimizeForLoadTime()) return true;
|
| +
|
| if (incremental_marking()->IsStopped() &&
|
| IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
|
| // We cannot start incremental marking.
|
| @@ -5369,6 +5362,9 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
| if (old_generation_space_available > new_space_->Capacity()) {
|
| return IncrementalMarkingLimit::kNoLimit;
|
| }
|
| +
|
| + if (ShouldOptimizeForLoadTime()) return IncrementalMarkingLimit::kNoLimit;
|
| +
|
| // We are close to the allocation limit.
|
| // Choose between the hard and the soft limits.
|
| if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
|
|
|