Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/heap/heap.cc

Issue 2407153002: [heap] Use RAIL mode for initial heap sizing (Closed)
Patch Set: fix test Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking-job.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 : external_memory_(0), 73 : external_memory_(0),
74 external_memory_limit_(kExternalAllocationSoftLimit), 74 external_memory_limit_(kExternalAllocationSoftLimit),
75 external_memory_at_last_mark_compact_(0), 75 external_memory_at_last_mark_compact_(0),
76 isolate_(nullptr), 76 isolate_(nullptr),
77 code_range_size_(0), 77 code_range_size_(0),
78 // semispace_size_ should be a power of 2 and old_generation_size_ should 78 // semispace_size_ should be a power of 2 and old_generation_size_ should
79 // be a multiple of Page::kPageSize. 79 // be a multiple of Page::kPageSize.
80 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 80 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
81 initial_semispace_size_(MB), 81 initial_semispace_size_(MB),
82 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), 82 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
83 initial_old_generation_size_(max_old_generation_size_ /
84 kInitalOldGenerationLimitFactor),
85 old_generation_size_configured_(false),
86 max_executable_size_(256ul * (kPointerSize / 4) * MB), 83 max_executable_size_(256ul * (kPointerSize / 4) * MB),
87 // Variables set based on semispace_size_ and old_generation_size_ in 84 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap. 85 // ConfigureHeap.
89 // Will be 4 * reserved_semispace_size_ to ensure that young 86 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size. 87 // generation can be aligned to its size.
91 maximum_committed_(0), 88 maximum_committed_(0),
92 survived_since_last_expansion_(0), 89 survived_since_last_expansion_(0),
93 survived_last_scavenge_(0), 90 survived_last_scavenge_(0),
94 always_allocate_scope_count_(0), 91 always_allocate_scope_count_(0),
95 memory_pressure_level_(MemoryPressureLevel::kNone), 92 memory_pressure_level_(MemoryPressureLevel::kNone),
96 contexts_disposed_(0), 93 contexts_disposed_(0),
97 number_of_disposed_maps_(0), 94 number_of_disposed_maps_(0),
98 global_ic_age_(0), 95 global_ic_age_(0),
99 new_space_(nullptr), 96 new_space_(nullptr),
100 old_space_(NULL), 97 old_space_(NULL),
101 code_space_(NULL), 98 code_space_(NULL),
102 map_space_(NULL), 99 map_space_(NULL),
103 lo_space_(NULL), 100 lo_space_(NULL),
104 gc_state_(NOT_IN_GC), 101 gc_state_(NOT_IN_GC),
105 gc_post_processing_depth_(0), 102 gc_post_processing_depth_(0),
106 allocations_count_(0), 103 allocations_count_(0),
107 raw_allocations_hash_(0), 104 raw_allocations_hash_(0),
108 ms_count_(0), 105 ms_count_(0),
109 gc_count_(0), 106 gc_count_(0),
110 remembered_unmapped_pages_index_(0), 107 remembered_unmapped_pages_index_(0),
111 #ifdef DEBUG 108 #ifdef DEBUG
112 allocation_timeout_(0), 109 allocation_timeout_(0),
113 #endif // DEBUG 110 #endif // DEBUG
114 old_generation_allocation_limit_(initial_old_generation_size_), 111 old_generation_allocation_limit_(0),
115 inline_allocation_disabled_(false), 112 inline_allocation_disabled_(false),
116 total_regexp_code_generated_(0), 113 total_regexp_code_generated_(0),
117 tracer_(nullptr), 114 tracer_(nullptr),
118 promoted_objects_size_(0), 115 promoted_objects_size_(0),
119 promotion_ratio_(0), 116 promotion_ratio_(0),
120 semi_space_copied_object_size_(0), 117 semi_space_copied_object_size_(0),
121 previous_semi_space_copied_object_size_(0), 118 previous_semi_space_copied_object_size_(0),
122 semi_space_copied_rate_(0), 119 semi_space_copied_rate_(0),
123 nodes_died_in_new_space_(0), 120 nodes_died_in_new_space_(0),
124 nodes_copied_in_new_space_(0), 121 nodes_copied_in_new_space_(0),
(...skipping 917 matching lines...) Expand 10 before | Expand all | Expand 10 after
1042 kNoGCCallbackFlags); 1039 kNoGCCallbackFlags);
1043 } 1040 }
1044 1041
1045 return next_gc_likely_to_collect_more; 1042 return next_gc_likely_to_collect_more;
1046 } 1043 }
1047 1044
1048 1045
1049 int Heap::NotifyContextDisposed(bool dependant_context) { 1046 int Heap::NotifyContextDisposed(bool dependant_context) {
1050 if (!dependant_context) { 1047 if (!dependant_context) {
1051 tracer()->ResetSurvivalEvents(); 1048 tracer()->ResetSurvivalEvents();
1052 old_generation_size_configured_ = false;
1053 MemoryReducer::Event event; 1049 MemoryReducer::Event event;
1054 event.type = MemoryReducer::kPossibleGarbage; 1050 event.type = MemoryReducer::kPossibleGarbage;
1055 event.time_ms = MonotonicallyIncreasingTimeInMs(); 1051 event.time_ms = MonotonicallyIncreasingTimeInMs();
1056 memory_reducer_->NotifyPossibleGarbage(event); 1052 memory_reducer_->NotifyPossibleGarbage(event);
1057 } 1053 }
1058 if (isolate()->concurrent_recompilation_enabled()) { 1054 if (isolate()->concurrent_recompilation_enabled()) {
1059 // Flush the queued recompilation tasks. 1055 // Flush the queued recompilation tasks.
1060 isolate()->optimizing_compile_dispatcher()->Flush(); 1056 isolate()->optimizing_compile_dispatcher()->Flush();
1061 } 1057 }
1062 AgeInlineCaches(); 1058 AgeInlineCaches();
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
1306 int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); 1302 int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1307 1303
1308 { 1304 {
1309 Heap::PretenuringScope pretenuring_scope(this); 1305 Heap::PretenuringScope pretenuring_scope(this);
1310 1306
1311 switch (collector) { 1307 switch (collector) {
1312 case MARK_COMPACTOR: 1308 case MARK_COMPACTOR:
1313 UpdateOldGenerationAllocationCounter(); 1309 UpdateOldGenerationAllocationCounter();
1314 // Perform mark-sweep with optional compaction. 1310 // Perform mark-sweep with optional compaction.
1315 MarkCompact(); 1311 MarkCompact();
1316 old_generation_size_configured_ = true;
1317 // This should be updated before PostGarbageCollectionProcessing, which 1312 // This should be updated before PostGarbageCollectionProcessing, which
1318 // can cause another GC. Take into account the objects promoted during 1313 // can cause another GC. Take into account the objects promoted during
1319 // GC. 1314 // GC.
1320 old_generation_allocation_counter_at_last_gc_ += 1315 old_generation_allocation_counter_at_last_gc_ +=
1321 static_cast<size_t>(promoted_objects_size_); 1316 static_cast<size_t>(promoted_objects_size_);
1322 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); 1317 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1323 break; 1318 break;
1324 case MINOR_MARK_COMPACTOR: 1319 case MINOR_MARK_COMPACTOR:
1325 MinorMarkCompact(); 1320 MinorMarkCompact();
1326 break; 1321 break;
1327 case SCAVENGER: 1322 case SCAVENGER:
1328 Scavenge(); 1323 Scavenge();
1329 break; 1324 break;
1330 } 1325 }
1331 1326
1332 ProcessPretenuringFeedback(); 1327 ProcessPretenuringFeedback();
1333 } 1328 }
1334 1329
1335 UpdateSurvivalStatistics(start_new_space_size); 1330 UpdateSurvivalStatistics(start_new_space_size);
1336 ConfigureInitialOldGenerationSize();
1337 1331
1338 isolate_->counters()->objs_since_last_young()->Set(0); 1332 isolate_->counters()->objs_since_last_young()->Set(0);
1339 1333
1340 gc_post_processing_depth_++; 1334 gc_post_processing_depth_++;
1341 { 1335 {
1342 AllowHeapAllocation allow_allocation; 1336 AllowHeapAllocation allow_allocation;
1343 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES); 1337 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
1344 freed_global_handles = 1338 freed_global_handles =
1345 isolate_->global_handles()->PostGarbageCollectionProcessing( 1339 isolate_->global_handles()->PostGarbageCollectionProcessing(
1346 collector, gc_callback_flags); 1340 collector, gc_callback_flags);
1347 } 1341 }
1348 gc_post_processing_depth_--; 1342 gc_post_processing_depth_--;
1349 1343
1350 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1344 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1351 1345
1352 // Update relocatables. 1346 // Update relocatables.
1353 Relocatable::PostGarbageCollectionProcessing(isolate_); 1347 Relocatable::PostGarbageCollectionProcessing(isolate_);
1354 1348
1355 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); 1349 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1356 double mutator_speed = 1350 double mutator_speed =
1357 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond(); 1351 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1358 size_t old_gen_size = PromotedSpaceSizeOfObjects(); 1352 size_t old_gen_size = PromotedSpaceSizeOfObjects();
1359 if (collector == MARK_COMPACTOR) { 1353 if (collector == MARK_COMPACTOR) {
1360 // Register the amount of external allocated memory. 1354 // Register the amount of external allocated memory.
1361 external_memory_at_last_mark_compact_ = external_memory_; 1355 external_memory_at_last_mark_compact_ = external_memory_;
1362 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit; 1356 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1363 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1357 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1364 } else if (HasLowYoungGenerationAllocationRate() && 1358 } else if (HasLowYoungGenerationAllocationRate()) {
1365 old_generation_size_configured_) {
1366 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1359 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1367 } 1360 }
1368 1361
1369 { 1362 {
1370 GCCallbacksScope scope(this); 1363 GCCallbacksScope scope(this);
1371 if (scope.CheckReenter()) { 1364 if (scope.CheckReenter()) {
1372 AllowHeapAllocation allow_allocation; 1365 AllowHeapAllocation allow_allocation;
1373 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE); 1366 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
1374 VMState<EXTERNAL> state(isolate_); 1367 VMState<EXTERNAL> state(isolate_);
1375 HandleScope handle_scope(isolate_); 1368 HandleScope handle_scope(isolate_);
(...skipping 609 matching lines...) Expand 10 before | Expand all | Expand 10 after
1985 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) { 1978 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
1986 ArrayBufferTracker::RegisterNew(this, buffer); 1979 ArrayBufferTracker::RegisterNew(this, buffer);
1987 } 1980 }
1988 1981
1989 1982
1990 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) { 1983 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
1991 ArrayBufferTracker::Unregister(this, buffer); 1984 ArrayBufferTracker::Unregister(this, buffer);
1992 } 1985 }
1993 1986
1994 1987
1995 void Heap::ConfigureInitialOldGenerationSize() {
1996 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
1997 old_generation_allocation_limit_ =
1998 Max(MinimumAllocationLimitGrowingStep(),
1999 static_cast<size_t>(
2000 static_cast<double>(old_generation_allocation_limit_) *
2001 (tracer()->AverageSurvivalRatio() / 100)));
2002 }
2003 }
2004
2005
2006 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, 1988 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2007 int instance_size) { 1989 int instance_size) {
2008 Object* result = nullptr; 1990 Object* result = nullptr;
2009 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); 1991 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2010 if (!allocation.To(&result)) return allocation; 1992 if (!allocation.To(&result)) return allocation;
2011 1993
2012 // Map::cast cannot be used due to uninitialized map field. 1994 // Map::cast cannot be used due to uninitialized map field.
2013 reinterpret_cast<Map*>(result)->set_map( 1995 reinterpret_cast<Map*>(result)->set_map(
2014 reinterpret_cast<Map*>(root(kMetaMapRootIndex))); 1996 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2015 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 1997 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
(...skipping 2234 matching lines...) Expand 10 before | Expand all | Expand 10 after
4250 bool result = false; 4232 bool result = false;
4251 switch (action.type) { 4233 switch (action.type) {
4252 case DONE: 4234 case DONE:
4253 result = true; 4235 result = true;
4254 break; 4236 break;
4255 case DO_INCREMENTAL_STEP: { 4237 case DO_INCREMENTAL_STEP: {
4256 const double remaining_idle_time_in_ms = 4238 const double remaining_idle_time_in_ms =
4257 incremental_marking()->AdvanceIncrementalMarking( 4239 incremental_marking()->AdvanceIncrementalMarking(
4258 deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD, 4240 deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4259 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask); 4241 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
4260 if (remaining_idle_time_in_ms > 0.0) { 4242 if (remaining_idle_time_in_ms > 0.0 &&
4243 incremental_marking()->IsMarking()) {
4261 TryFinalizeIdleIncrementalMarking( 4244 TryFinalizeIdleIncrementalMarking(
4262 remaining_idle_time_in_ms, 4245 remaining_idle_time_in_ms,
4263 GarbageCollectionReason::kFinalizeMarkingViaTask); 4246 GarbageCollectionReason::kFinalizeMarkingViaTask);
4264 } 4247 }
4265 result = incremental_marking()->IsStopped(); 4248 result = incremental_marking()->IsStopped();
4266 break; 4249 break;
4267 } 4250 }
4268 case DO_FULL_GC: { 4251 case DO_FULL_GC: {
4269 DCHECK(contexts_disposed_ > 0); 4252 DCHECK(contexts_disposed_ > 0);
4270 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4253 HistogramTimerScope scope(isolate_->counters()->gc_context());
(...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after
5042 max_old_generation_size_ = 5025 max_old_generation_size_ =
5043 Max(static_cast<size_t>(paged_space_count * Page::kPageSize), 5026 Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
5044 max_old_generation_size_); 5027 max_old_generation_size_);
5045 5028
5046 // The max executable size must be less than or equal to the max old 5029 // The max executable size must be less than or equal to the max old
5047 // generation size. 5030 // generation size.
5048 if (max_executable_size_ > max_old_generation_size_) { 5031 if (max_executable_size_ > max_old_generation_size_) {
5049 max_executable_size_ = max_old_generation_size_; 5032 max_executable_size_ = max_old_generation_size_;
5050 } 5033 }
5051 5034
5052 if (FLAG_initial_old_space_size > 0) {
5053 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5054 } else {
5055 initial_old_generation_size_ =
5056 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5057 }
5058 old_generation_allocation_limit_ = initial_old_generation_size_;
5059
5060 // We rely on being able to allocate new arrays in paged spaces. 5035 // We rely on being able to allocate new arrays in paged spaces.
5061 DCHECK(kMaxRegularHeapObjectSize >= 5036 DCHECK(kMaxRegularHeapObjectSize >=
5062 (JSArray::kSize + 5037 (JSArray::kSize +
5063 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) + 5038 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5064 AllocationMemento::kSize)); 5039 AllocationMemento::kSize));
5065 5040
5066 code_range_size_ = code_range_size * MB; 5041 code_range_size_ = code_range_size * MB;
5067 5042
5068 configured_ = true; 5043 configured_ = true;
5069 return true; 5044 return true;
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
5287 "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS 5262 "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
5288 " KB, " 5263 " KB, "
5289 "new limit: %" PRIuS " KB (%.1f)\n", 5264 "new limit: %" PRIuS " KB (%.1f)\n",
5290 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB, 5265 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5291 factor); 5266 factor);
5292 } 5267 }
5293 old_generation_allocation_limit_ = limit; 5268 old_generation_allocation_limit_ = limit;
5294 } 5269 }
5295 } 5270 }
5296 5271
5272 size_t Heap::OldGenerationSpaceAvailable() {
5273 if (old_generation_allocation_limit_ == 0) {
5274 // Lazy initialization of allocation limit.
5275 old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5276 kConservativeHeapGrowingFactor, PromotedSpaceSizeOfObjects());
5277 }
5278 if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
5279 return old_generation_allocation_limit_ -
5280 static_cast<size_t>(PromotedTotalSize());
5281 }
5282
5283 bool Heap::ShouldOptimizeForLoadTime() {
5284 return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5285 PromotedTotalSize() <
5286 max_old_generation_size_ / kInitalOldGenerationLimitFactor &&
5287 MonotonicallyIncreasingTimeInMs() <
5288 isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5289 }
5290
5297 // This predicate is called when an old generation space cannot allocated from 5291 // This predicate is called when an old generation space cannot allocated from
5298 // the free list and is about to add a new page. Returning false will cause a 5292 // the free list and is about to add a new page. Returning false will cause a
5299 // major GC. It happens when the old generation allocation limit is reached and 5293 // major GC. It happens when the old generation allocation limit is reached and
5300 // - either we need to optimize for memory usage, 5294 // - either we need to optimize for memory usage,
5301 // - or the incremental marking is not in progress and we cannot start it. 5295 // - or the incremental marking is not in progress and we cannot start it.
5302 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() { 5296 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
5303 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; 5297 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5304 // We reached the old generation allocation limit. 5298 // We reached the old generation allocation limit.
5305 5299
5306 if (ShouldOptimizeForMemoryUsage()) return false; 5300 if (ShouldOptimizeForMemoryUsage()) return false;
5307 5301
5302 if (ShouldOptimizeForLoadTime()) return true;
5303
5308 if (incremental_marking()->IsStopped() && 5304 if (incremental_marking()->IsStopped() &&
5309 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) { 5305 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5310 // We cannot start incremental marking. 5306 // We cannot start incremental marking.
5311 return false; 5307 return false;
5312 } 5308 }
5313 return true; 5309 return true;
5314 } 5310 }
5315 5311
5316 // This function returns either kNoLimit, kSoftLimit, or kHardLimit. 5312 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5317 // The kNoLimit means that either incremental marking is disabled or it is too 5313 // The kNoLimit means that either incremental marking is disabled or it is too
(...skipping 10 matching lines...) Expand all
5328 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) || 5324 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5329 HighMemoryPressure()) { 5325 HighMemoryPressure()) {
5330 // If there is high memory pressure or stress testing is enabled, then 5326 // If there is high memory pressure or stress testing is enabled, then
5331 // start marking immediately. 5327 // start marking immediately.
5332 return IncrementalMarkingLimit::kHardLimit; 5328 return IncrementalMarkingLimit::kHardLimit;
5333 } 5329 }
5334 size_t old_generation_space_available = OldGenerationSpaceAvailable(); 5330 size_t old_generation_space_available = OldGenerationSpaceAvailable();
5335 if (old_generation_space_available > new_space_->Capacity()) { 5331 if (old_generation_space_available > new_space_->Capacity()) {
5336 return IncrementalMarkingLimit::kNoLimit; 5332 return IncrementalMarkingLimit::kNoLimit;
5337 } 5333 }
5334
5335 if (ShouldOptimizeForLoadTime()) return IncrementalMarkingLimit::kNoLimit;
5336
5338 // We are close to the allocation limit. 5337 // We are close to the allocation limit.
5339 // Choose between the hard and the soft limits. 5338 // Choose between the hard and the soft limits.
5340 if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) { 5339 if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
5341 return IncrementalMarkingLimit::kHardLimit; 5340 return IncrementalMarkingLimit::kHardLimit;
5342 } 5341 }
5343 return IncrementalMarkingLimit::kSoftLimit; 5342 return IncrementalMarkingLimit::kSoftLimit;
5344 } 5343 }
5345 5344
5346 void Heap::EnableInlineAllocation() { 5345 void Heap::EnableInlineAllocation() {
5347 if (!inline_allocation_disabled_) return; 5346 if (!inline_allocation_disabled_) return;
(...skipping 1130 matching lines...) Expand 10 before | Expand all | Expand 10 after
6478 } 6477 }
6479 6478
6480 6479
6481 // static 6480 // static
6482 int Heap::GetStaticVisitorIdForMap(Map* map) { 6481 int Heap::GetStaticVisitorIdForMap(Map* map) {
6483 return StaticVisitorBase::GetVisitorId(map); 6482 return StaticVisitorBase::GetVisitorId(map);
6484 } 6483 }
6485 6484
6486 } // namespace internal 6485 } // namespace internal
6487 } // namespace v8 6486 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking-job.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698