Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(40)

Side by Side Diff: src/heap/heap.cc

Issue 2490523003: [heap] Use size_t for heap and space counters. (Closed)
Patch Set: more fixes Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 set_native_contexts_list(NULL); 172 set_native_contexts_list(NULL);
173 set_allocation_sites_list(Smi::kZero); 173 set_allocation_sites_list(Smi::kZero);
174 set_encountered_weak_collections(Smi::kZero); 174 set_encountered_weak_collections(Smi::kZero);
175 set_encountered_weak_cells(Smi::kZero); 175 set_encountered_weak_cells(Smi::kZero);
176 set_encountered_transition_arrays(Smi::kZero); 176 set_encountered_transition_arrays(Smi::kZero);
177 // Put a dummy entry in the remembered pages so we can find the list the 177 // Put a dummy entry in the remembered pages so we can find the list the
178 // minidump even if there are no real unmapped pages. 178 // minidump even if there are no real unmapped pages.
179 RememberUnmappedPage(NULL, false); 179 RememberUnmappedPage(NULL, false);
180 } 180 }
181 181
182 182 size_t Heap::Capacity() {
183 intptr_t Heap::Capacity() {
184 if (!HasBeenSetUp()) return 0; 183 if (!HasBeenSetUp()) return 0;
185 184
186 return new_space_->Capacity() + OldGenerationCapacity(); 185 return new_space_->Capacity() + OldGenerationCapacity();
187 } 186 }
188 187
189 intptr_t Heap::OldGenerationCapacity() { 188 size_t Heap::OldGenerationCapacity() {
190 if (!HasBeenSetUp()) return 0; 189 if (!HasBeenSetUp()) return 0;
191 190
192 return old_space_->Capacity() + code_space_->Capacity() + 191 return old_space_->Capacity() + code_space_->Capacity() +
193 map_space_->Capacity() + lo_space_->SizeOfObjects(); 192 map_space_->Capacity() + lo_space_->SizeOfObjects();
194 } 193 }
195 194
196 size_t Heap::CommittedOldGenerationMemory() { 195 size_t Heap::CommittedOldGenerationMemory() {
197 if (!HasBeenSetUp()) return 0; 196 if (!HasBeenSetUp()) return 0;
198 197
199 return old_space_->CommittedMemory() + code_space_->CommittedMemory() + 198 return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
(...skipping 26 matching lines...) Expand all
226 225
227 void Heap::UpdateMaximumCommitted() { 226 void Heap::UpdateMaximumCommitted() {
228 if (!HasBeenSetUp()) return; 227 if (!HasBeenSetUp()) return;
229 228
230 const size_t current_committed_memory = CommittedMemory(); 229 const size_t current_committed_memory = CommittedMemory();
231 if (current_committed_memory > maximum_committed_) { 230 if (current_committed_memory > maximum_committed_) {
232 maximum_committed_ = current_committed_memory; 231 maximum_committed_ = current_committed_memory;
233 } 232 }
234 } 233 }
235 234
236 235 size_t Heap::Available() {
237 intptr_t Heap::Available() {
238 if (!HasBeenSetUp()) return 0; 236 if (!HasBeenSetUp()) return 0;
239 237
240 intptr_t total = 0; 238 size_t total = 0;
241 AllSpaces spaces(this); 239 AllSpaces spaces(this);
242 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 240 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
243 total += space->Available(); 241 total += space->Available();
244 } 242 }
245 return total; 243 return total;
246 } 244 }
247 245
248 246
249 bool Heap::HasBeenSetUp() { 247 bool Heap::HasBeenSetUp() {
250 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL && 248 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
(...skipping 17 matching lines...) Expand all
268 266
269 // Is there enough space left in OLD to guarantee that a scavenge can 267 // Is there enough space left in OLD to guarantee that a scavenge can
270 // succeed? 268 // succeed?
271 // 269 //
272 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available 270 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
273 // for object promotion. It counts only the bytes that the memory 271 // for object promotion. It counts only the bytes that the memory
274 // allocator has not yet allocated from the OS and assigned to any space, 272 // allocator has not yet allocated from the OS and assigned to any space,
275 // and does not count available bytes already in the old space or code 273 // and does not count available bytes already in the old space or code
276 // space. Undercounting is safe---we may get an unrequested full GC when 274 // space. Undercounting is safe---we may get an unrequested full GC when
277 // a scavenge would have succeeded. 275 // a scavenge would have succeeded.
278 if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <= 276 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
279 new_space_->Size()) {
280 isolate_->counters() 277 isolate_->counters()
281 ->gc_compactor_caused_by_oldspace_exhaustion() 278 ->gc_compactor_caused_by_oldspace_exhaustion()
282 ->Increment(); 279 ->Increment();
283 *reason = "scavenge might not succeed"; 280 *reason = "scavenge might not succeed";
284 return MARK_COMPACTOR; 281 return MARK_COMPACTOR;
285 } 282 }
286 283
287 // Default 284 // Default
288 *reason = NULL; 285 *reason = NULL;
289 return YoungGenerationCollector(); 286 return YoungGenerationCollector();
(...skipping 19 matching lines...) Expand all
309 new_space_->CollectStatistics(); 306 new_space_->CollectStatistics();
310 new_space_->ReportStatistics(); 307 new_space_->ReportStatistics();
311 new_space_->ClearHistograms(); 308 new_space_->ClearHistograms();
312 } 309 }
313 #endif // DEBUG 310 #endif // DEBUG
314 } 311 }
315 312
316 313
317 void Heap::PrintShortHeapStatistics() { 314 void Heap::PrintShortHeapStatistics() {
318 if (!FLAG_trace_gc_verbose) return; 315 if (!FLAG_trace_gc_verbose) return;
319 PrintIsolate(isolate_, 316 PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
320 "Memory allocator, used: %6zu KB," 317 " KB,"
321 " available: %6zu KB\n", 318 " available: %6" PRIuS " KB\n",
322 memory_allocator()->Size() / KB, 319 memory_allocator()->Size() / KB,
323 memory_allocator()->Available() / KB); 320 memory_allocator()->Available() / KB);
324 PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR 321 PrintIsolate(isolate_, "New space, used: %6" PRIuS
325 " KB" 322 " KB"
326 ", available: %6" V8PRIdPTR 323 ", available: %6" PRIuS
327 " KB" 324 " KB"
328 ", committed: %6zu KB\n", 325 ", committed: %6" PRIuS " KB\n",
329 new_space_->Size() / KB, new_space_->Available() / KB, 326 new_space_->Size() / KB, new_space_->Available() / KB,
330 new_space_->CommittedMemory() / KB); 327 new_space_->CommittedMemory() / KB);
331 PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR 328 PrintIsolate(isolate_, "Old space, used: %6" PRIuS
332 " KB" 329 " KB"
333 ", available: %6" V8PRIdPTR 330 ", available: %6" PRIuS
334 " KB" 331 " KB"
335 ", committed: %6zu KB\n", 332 ", committed: %6" PRIuS " KB\n",
336 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB, 333 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
337 old_space_->CommittedMemory() / KB); 334 old_space_->CommittedMemory() / KB);
338 PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR 335 PrintIsolate(isolate_, "Code space, used: %6" PRIuS
339 " KB" 336 " KB"
340 ", available: %6" V8PRIdPTR 337 ", available: %6" PRIuS
341 " KB" 338 " KB"
342 ", committed: %6zu KB\n", 339 ", committed: %6" PRIuS "KB\n",
343 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB, 340 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
344 code_space_->CommittedMemory() / KB); 341 code_space_->CommittedMemory() / KB);
345 PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR 342 PrintIsolate(isolate_, "Map space, used: %6" PRIuS
346 " KB" 343 " KB"
347 ", available: %6" V8PRIdPTR 344 ", available: %6" PRIuS
348 " KB" 345 " KB"
349 ", committed: %6zu KB\n", 346 ", committed: %6" PRIuS " KB\n",
350 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB, 347 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
351 map_space_->CommittedMemory() / KB); 348 map_space_->CommittedMemory() / KB);
352 PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR 349 PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
353 " KB" 350 " KB"
354 ", available: %6" V8PRIdPTR 351 ", available: %6" PRIuS
355 " KB" 352 " KB"
356 ", committed: %6zu KB\n", 353 ", committed: %6" PRIuS " KB\n",
357 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB, 354 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
358 lo_space_->CommittedMemory() / KB); 355 lo_space_->CommittedMemory() / KB);
359 PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR 356 PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
360 " KB" 357 " KB"
361 ", available: %6" V8PRIdPTR 358 ", available: %6" PRIuS
362 " KB" 359 " KB"
363 ", committed: %6zu KB\n", 360 ", committed: %6" PRIuS "KB\n",
364 this->SizeOfObjects() / KB, this->Available() / KB, 361 this->SizeOfObjects() / KB, this->Available() / KB,
365 this->CommittedMemory() / KB); 362 this->CommittedMemory() / KB);
366 PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n", 363 PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
367 static_cast<intptr_t>(external_memory_ / KB)); 364 external_memory_ / KB);
368 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n", 365 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
369 total_gc_time_ms_); 366 total_gc_time_ms_);
370 } 367 }
371 368
372 // TODO(1238405): Combine the infrastructure for --heap-stats and 369 // TODO(1238405): Combine the infrastructure for --heap-stats and
373 // --log-gc to avoid the complicated preprocessor and flag testing. 370 // --log-gc to avoid the complicated preprocessor and flag testing.
374 void Heap::ReportStatisticsAfterGC() { 371 void Heap::ReportStatisticsAfterGC() {
375 // Similar to the before GC, we use some complicated logic to ensure that 372 // Similar to the before GC, we use some complicated logic to ensure that
376 // NewSpace statistics are logged exactly once when --log-gc is turned on. 373 // NewSpace statistics are logged exactly once when --log-gc is turned on.
377 #if defined(DEBUG) 374 #if defined(DEBUG)
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
435 if (new_space_->IsAtMaximumCapacity()) { 432 if (new_space_->IsAtMaximumCapacity()) {
436 maximum_size_scavenges_++; 433 maximum_size_scavenges_++;
437 } else { 434 } else {
438 maximum_size_scavenges_ = 0; 435 maximum_size_scavenges_ = 0;
439 } 436 }
440 CheckNewSpaceExpansionCriteria(); 437 CheckNewSpaceExpansionCriteria();
441 UpdateNewSpaceAllocationCounter(); 438 UpdateNewSpaceAllocationCounter();
442 store_buffer()->MoveAllEntriesToRememberedSet(); 439 store_buffer()->MoveAllEntriesToRememberedSet();
443 } 440 }
444 441
445 442 size_t Heap::SizeOfObjects() {
446 intptr_t Heap::SizeOfObjects() { 443 size_t total = 0;
447 intptr_t total = 0;
448 AllSpaces spaces(this); 444 AllSpaces spaces(this);
449 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 445 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
450 total += space->SizeOfObjects(); 446 total += space->SizeOfObjects();
451 } 447 }
452 return total; 448 return total;
453 } 449 }
454 450
455 451
456 const char* Heap::GetSpaceName(int idx) { 452 const char* Heap::GetSpaceName(int idx) {
457 switch (idx) { 453 switch (idx) {
(...skipping 512 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 if (FLAG_trace_incremental_marking) { 966 if (FLAG_trace_incremental_marking) {
971 isolate()->PrintWithTimestamp( 967 isolate()->PrintWithTimestamp(
972 "[IncrementalMarking] Delaying MarkSweep.\n"); 968 "[IncrementalMarking] Delaying MarkSweep.\n");
973 } 969 }
974 collector = YoungGenerationCollector(); 970 collector = YoungGenerationCollector();
975 collector_reason = "incremental marking delaying mark-sweep"; 971 collector_reason = "incremental marking delaying mark-sweep";
976 } 972 }
977 } 973 }
978 974
979 bool next_gc_likely_to_collect_more = false; 975 bool next_gc_likely_to_collect_more = false;
980 intptr_t committed_memory_before = 0; 976 size_t committed_memory_before = 0;
981 977
982 if (collector == MARK_COMPACTOR) { 978 if (collector == MARK_COMPACTOR) {
983 committed_memory_before = CommittedOldGenerationMemory(); 979 committed_memory_before = CommittedOldGenerationMemory();
984 } 980 }
985 981
986 { 982 {
987 tracer()->Start(collector, gc_reason, collector_reason); 983 tracer()->Start(collector, gc_reason, collector_reason);
988 DCHECK(AllowHeapAllocation::IsAllowed()); 984 DCHECK(AllowHeapAllocation::IsAllowed());
989 DisallowHeapAllocation no_allocation_during_gc; 985 DisallowHeapAllocation no_allocation_during_gc;
990 GarbageCollectionPrologue(); 986 GarbageCollectionPrologue();
991 987
992 { 988 {
993 HistogramTimer* gc_type_timer = GCTypeTimer(collector); 989 HistogramTimer* gc_type_timer = GCTypeTimer(collector);
994 HistogramTimerScope histogram_timer_scope(gc_type_timer); 990 HistogramTimerScope histogram_timer_scope(gc_type_timer);
995 TRACE_EVENT0("v8", gc_type_timer->name()); 991 TRACE_EVENT0("v8", gc_type_timer->name());
996 992
997 next_gc_likely_to_collect_more = 993 next_gc_likely_to_collect_more =
998 PerformGarbageCollection(collector, gc_callback_flags); 994 PerformGarbageCollection(collector, gc_callback_flags);
999 } 995 }
1000 996
1001 GarbageCollectionEpilogue(); 997 GarbageCollectionEpilogue();
1002 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { 998 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1003 isolate()->CheckDetachedContextsAfterGC(); 999 isolate()->CheckDetachedContextsAfterGC();
1004 } 1000 }
1005 1001
1006 if (collector == MARK_COMPACTOR) { 1002 if (collector == MARK_COMPACTOR) {
1007 intptr_t committed_memory_after = CommittedOldGenerationMemory(); 1003 size_t committed_memory_after = CommittedOldGenerationMemory();
1008 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); 1004 size_t used_memory_after = PromotedSpaceSizeOfObjects();
1009 MemoryReducer::Event event; 1005 MemoryReducer::Event event;
1010 event.type = MemoryReducer::kMarkCompact; 1006 event.type = MemoryReducer::kMarkCompact;
1011 event.time_ms = MonotonicallyIncreasingTimeInMs(); 1007 event.time_ms = MonotonicallyIncreasingTimeInMs();
1012 // Trigger one more GC if 1008 // Trigger one more GC if
1013 // - this GC decreased committed memory, 1009 // - this GC decreased committed memory,
1014 // - there is high fragmentation, 1010 // - there is high fragmentation,
1015 // - there are live detached contexts. 1011 // - there are live detached contexts.
1016 event.next_gc_likely_to_collect_more = 1012 event.next_gc_likely_to_collect_more =
1017 (committed_memory_before - committed_memory_after) > MB || 1013 (committed_memory_before > committed_memory_after + MB) ||
1018 HasHighFragmentation(used_memory_after, committed_memory_after) || 1014 HasHighFragmentation(used_memory_after, committed_memory_after) ||
1019 (detached_contexts()->length() > 0); 1015 (detached_contexts()->length() > 0);
1020 if (deserialization_complete_) { 1016 if (deserialization_complete_) {
1021 memory_reducer_->NotifyMarkCompact(event); 1017 memory_reducer_->NotifyMarkCompact(event);
1022 } 1018 }
1023 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone); 1019 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1024 } 1020 }
1025 1021
1026 tracer()->Stop(collector); 1022 tracer()->Stop(collector);
1027 } 1023 }
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after
1348 gc_post_processing_depth_--; 1344 gc_post_processing_depth_--;
1349 1345
1350 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1346 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1351 1347
1352 // Update relocatables. 1348 // Update relocatables.
1353 Relocatable::PostGarbageCollectionProcessing(isolate_); 1349 Relocatable::PostGarbageCollectionProcessing(isolate_);
1354 1350
1355 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); 1351 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1356 double mutator_speed = 1352 double mutator_speed =
1357 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond(); 1353 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1358 intptr_t old_gen_size = PromotedSpaceSizeOfObjects(); 1354 size_t old_gen_size = PromotedSpaceSizeOfObjects();
1359 if (collector == MARK_COMPACTOR) { 1355 if (collector == MARK_COMPACTOR) {
1360 // Register the amount of external allocated memory. 1356 // Register the amount of external allocated memory.
1361 external_memory_at_last_mark_compact_ = external_memory_; 1357 external_memory_at_last_mark_compact_ = external_memory_;
1362 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit; 1358 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1363 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1359 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1364 } else if (HasLowYoungGenerationAllocationRate() && 1360 } else if (HasLowYoungGenerationAllocationRate() &&
1365 old_generation_size_configured_) { 1361 old_generation_size_configured_) {
1366 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1362 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1367 } 1363 }
1368 1364
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
1598 PauseAllocationObserversScope pause_observers(this); 1594 PauseAllocationObserversScope pause_observers(this);
1599 1595
1600 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); 1596 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1601 1597
1602 gc_state_ = SCAVENGE; 1598 gc_state_ = SCAVENGE;
1603 1599
1604 // Implements Cheney's copying algorithm 1600 // Implements Cheney's copying algorithm
1605 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1601 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1606 1602
1607 // Used for updating survived_since_last_expansion_ at function end. 1603 // Used for updating survived_since_last_expansion_ at function end.
1608 intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); 1604 size_t survived_watermark = PromotedSpaceSizeOfObjects();
1609 1605
1610 scavenge_collector_->SelectScavengingVisitorsTable(); 1606 scavenge_collector_->SelectScavengingVisitorsTable();
1611 1607
1612 if (UsingEmbedderHeapTracer()) { 1608 if (UsingEmbedderHeapTracer()) {
1613 // Register found wrappers with embedder so it can add them to its marking 1609 // Register found wrappers with embedder so it can add them to its marking
1614 // deque and correctly manage the case when v8 scavenger collects the 1610 // deque and correctly manage the case when v8 scavenger collects the
1615 // wrappers by either keeping wrappables alive, or cleaning marking deque. 1611 // wrappers by either keeping wrappables alive, or cleaning marking deque.
1616 RegisterWrappersWithEmbedderHeapTracer(); 1612 RegisterWrappersWithEmbedderHeapTracer();
1617 } 1613 }
1618 1614
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
1710 ProcessYoungWeakReferences(&weak_object_retainer); 1706 ProcessYoungWeakReferences(&weak_object_retainer);
1711 1707
1712 DCHECK(new_space_front == new_space_->top()); 1708 DCHECK(new_space_front == new_space_->top());
1713 1709
1714 // Set age mark. 1710 // Set age mark.
1715 new_space_->set_age_mark(new_space_->top()); 1711 new_space_->set_age_mark(new_space_->top());
1716 1712
1717 ArrayBufferTracker::FreeDeadInNewSpace(this); 1713 ArrayBufferTracker::FreeDeadInNewSpace(this);
1718 1714
1719 // Update how much has survived scavenge. 1715 // Update how much has survived scavenge.
1720 IncrementYoungSurvivorsCounter( 1716 DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
1721 static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) + 1717 IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
1722 new_space_->Size())); 1718 new_space_->Size() - survived_watermark);
1723 1719
1724 LOG(isolate_, ResourceEvent("scavenge", "end")); 1720 LOG(isolate_, ResourceEvent("scavenge", "end"));
1725 1721
1726 gc_state_ = NOT_IN_GC; 1722 gc_state_ = NOT_IN_GC;
1727 } 1723 }
1728 1724
1729 1725
1730 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 1726 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1731 Object** p) { 1727 Object** p) {
1732 MapWord first_word = HeapObject::cast(*p)->map_word(); 1728 MapWord first_word = HeapObject::cast(*p)->map_word();
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after
2000 1996
2001 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) { 1997 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2002 ArrayBufferTracker::Unregister(this, buffer); 1998 ArrayBufferTracker::Unregister(this, buffer);
2003 } 1999 }
2004 2000
2005 2001
2006 void Heap::ConfigureInitialOldGenerationSize() { 2002 void Heap::ConfigureInitialOldGenerationSize() {
2007 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) { 2003 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2008 old_generation_allocation_limit_ = 2004 old_generation_allocation_limit_ =
2009 Max(MinimumAllocationLimitGrowingStep(), 2005 Max(MinimumAllocationLimitGrowingStep(),
2010 static_cast<intptr_t>( 2006 static_cast<size_t>(
2011 static_cast<double>(old_generation_allocation_limit_) * 2007 static_cast<double>(old_generation_allocation_limit_) *
2012 (tracer()->AverageSurvivalRatio() / 100))); 2008 (tracer()->AverageSurvivalRatio() / 100)));
2013 } 2009 }
2014 } 2010 }
2015 2011
2016 2012
2017 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, 2013 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2018 int instance_size) { 2014 int instance_size) {
2019 Object* result = nullptr; 2015 Object* result = nullptr;
2020 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); 2016 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
(...skipping 881 matching lines...) Expand 10 before | Expand all | Expand 10 after
2902 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) { 2898 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2903 return !RootCanBeWrittenAfterInitialization(root_index) && 2899 return !RootCanBeWrittenAfterInitialization(root_index) &&
2904 !InNewSpace(root(root_index)); 2900 !InNewSpace(root(root_index));
2905 } 2901 }
2906 2902
2907 2903
2908 int Heap::FullSizeNumberStringCacheLength() { 2904 int Heap::FullSizeNumberStringCacheLength() {
2909 // Compute the size of the number string cache based on the max newspace size. 2905 // Compute the size of the number string cache based on the max newspace size.
2910 // The number string cache has a minimum size based on twice the initial cache 2906 // The number string cache has a minimum size based on twice the initial cache
2911 // size to ensure that it is bigger after being made 'full size'. 2907 // size to ensure that it is bigger after being made 'full size'.
2912 int number_string_cache_size = max_semi_space_size_ / 512; 2908 size_t number_string_cache_size = max_semi_space_size_ / 512;
2913 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2, 2909 number_string_cache_size =
2914 Min(0x4000, number_string_cache_size)); 2910 Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
2911 Min<size_t>(0x4000u, number_string_cache_size));
2915 // There is a string and a number per entry so the length is twice the number 2912 // There is a string and a number per entry so the length is twice the number
2916 // of entries. 2913 // of entries.
2917 return number_string_cache_size * 2; 2914 return static_cast<int>(number_string_cache_size * 2);
2918 } 2915 }
2919 2916
2920 2917
2921 void Heap::FlushNumberStringCache() { 2918 void Heap::FlushNumberStringCache() {
2922 // Flush the number to string cache. 2919 // Flush the number to string cache.
2923 int len = number_string_cache()->length(); 2920 int len = number_string_cache()->length();
2924 for (int i = 0; i < len; i++) { 2921 for (int i = 0; i < len; i++) {
2925 number_string_cache()->set_undefined(i); 2922 number_string_cache()->set_undefined(i);
2926 } 2923 }
2927 } 2924 }
(...skipping 1169 matching lines...) Expand 10 before | Expand all | Expand 10 after
4097 } 4094 }
4098 4095
4099 4096
4100 bool Heap::HasLowAllocationRate() { 4097 bool Heap::HasLowAllocationRate() {
4101 return HasLowYoungGenerationAllocationRate() && 4098 return HasLowYoungGenerationAllocationRate() &&
4102 HasLowOldGenerationAllocationRate(); 4099 HasLowOldGenerationAllocationRate();
4103 } 4100 }
4104 4101
4105 4102
4106 bool Heap::HasHighFragmentation() { 4103 bool Heap::HasHighFragmentation() {
4107 intptr_t used = PromotedSpaceSizeOfObjects(); 4104 size_t used = PromotedSpaceSizeOfObjects();
4108 intptr_t committed = CommittedOldGenerationMemory(); 4105 size_t committed = CommittedOldGenerationMemory();
4109 return HasHighFragmentation(used, committed); 4106 return HasHighFragmentation(used, committed);
4110 } 4107 }
4111 4108
4112 4109 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
4113 bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) { 4110 const size_t kSlack = 16 * MB;
4114 const intptr_t kSlack = 16 * MB;
4115 // Fragmentation is high if committed > 2 * used + kSlack. 4111 // Fragmentation is high if committed > 2 * used + kSlack.
4116 // Rewrite the exression to avoid overflow. 4112 // Rewrite the exression to avoid overflow.
4113 DCHECK_GE(committed, used);
4117 return committed - used > used + kSlack; 4114 return committed - used > used + kSlack;
4118 } 4115 }
4119 4116
4120 bool Heap::ShouldOptimizeForMemoryUsage() { 4117 bool Heap::ShouldOptimizeForMemoryUsage() {
4121 return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() || 4118 return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
4122 HighMemoryPressure() || IsLowMemoryDevice(); 4119 HighMemoryPressure() || IsLowMemoryDevice();
4123 } 4120 }
4124 4121
4125 void Heap::ActivateMemoryReducerIfNeeded() { 4122 void Heap::ActivateMemoryReducerIfNeeded() {
4126 // Activate memory reducer when switching to background if 4123 // Activate memory reducer when switching to background if
(...skipping 836 matching lines...) Expand 10 before | Expand all | Expand 10 after
4963 // We don't do a v->Synchronize call here, because in debug mode that will 4960 // We don't do a v->Synchronize call here, because in debug mode that will
4964 // output a flag to the snapshot. However at this point the serializer and 4961 // output a flag to the snapshot. However at this point the serializer and
4965 // deserializer are deliberately a little unsynchronized (see above) so the 4962 // deserializer are deliberately a little unsynchronized (see above) so the
4966 // checking of the sync flag in the snapshot would fail. 4963 // checking of the sync flag in the snapshot would fail.
4967 } 4964 }
4968 4965
4969 4966
4970 // TODO(1236194): Since the heap size is configurable on the command line 4967 // TODO(1236194): Since the heap size is configurable on the command line
4971 // and through the API, we should gracefully handle the case that the heap 4968 // and through the API, we should gracefully handle the case that the heap
4972 // size is not big enough to fit all the initial objects. 4969 // size is not big enough to fit all the initial objects.
4973 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, 4970 bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
4974 int max_executable_size, size_t code_range_size) { 4971 size_t max_executable_size, size_t code_range_size) {
4975 if (HasBeenSetUp()) return false; 4972 if (HasBeenSetUp()) return false;
4976 4973
4977 // Overwrite default configuration. 4974 // Overwrite default configuration.
4978 if (max_semi_space_size > 0) { 4975 if (max_semi_space_size != 0) {
4979 max_semi_space_size_ = max_semi_space_size * MB; 4976 max_semi_space_size_ = max_semi_space_size * MB;
4980 } 4977 }
4981 if (max_old_space_size > 0) { 4978 if (max_old_space_size != 0) {
4982 max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB; 4979 max_old_generation_size_ = max_old_space_size * MB;
4983 } 4980 }
4984 if (max_executable_size > 0) { 4981 if (max_executable_size != 0) {
4985 max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB; 4982 max_executable_size_ = max_executable_size * MB;
4986 } 4983 }
4987 4984
4988 // If max space size flags are specified overwrite the configuration. 4985 // If max space size flags are specified overwrite the configuration.
4989 if (FLAG_max_semi_space_size > 0) { 4986 if (FLAG_max_semi_space_size > 0) {
4990 max_semi_space_size_ = FLAG_max_semi_space_size * MB; 4987 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4991 } 4988 }
4992 if (FLAG_max_old_space_size > 0) { 4989 if (FLAG_max_old_space_size > 0) {
4993 max_old_generation_size_ = 4990 max_old_generation_size_ =
4994 static_cast<intptr_t>(FLAG_max_old_space_size) * MB; 4991 static_cast<size_t>(FLAG_max_old_space_size) * MB;
4995 } 4992 }
4996 if (FLAG_max_executable_size > 0) { 4993 if (FLAG_max_executable_size > 0) {
4997 max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB; 4994 max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
4998 } 4995 }
4999 4996
5000 if (Page::kPageSize > MB) { 4997 if (Page::kPageSize > MB) {
5001 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize); 4998 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
5002 max_old_generation_size_ = 4999 max_old_generation_size_ =
5003 ROUND_UP(max_old_generation_size_, Page::kPageSize); 5000 ROUND_UP(max_old_generation_size_, Page::kPageSize);
5004 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize); 5001 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
5005 } 5002 }
5006 5003
5007 if (FLAG_stress_compaction) { 5004 if (FLAG_stress_compaction) {
5008 // This will cause more frequent GCs when stressing. 5005 // This will cause more frequent GCs when stressing.
5009 max_semi_space_size_ = MB; 5006 max_semi_space_size_ = MB;
5010 } 5007 }
5011 5008
5012 // The new space size must be a power of two to support single-bit testing 5009 // The new space size must be a power of two to support single-bit testing
5013 // for containment. 5010 // for containment.
5014 max_semi_space_size_ = 5011 max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
5015 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_); 5012 static_cast<uint32_t>(max_semi_space_size_));
5016 5013
5017 if (FLAG_min_semi_space_size > 0) { 5014 if (FLAG_min_semi_space_size > 0) {
5018 int initial_semispace_size = FLAG_min_semi_space_size * MB; 5015 size_t initial_semispace_size =
5016 static_cast<size_t>(FLAG_min_semi_space_size) * MB;
5019 if (initial_semispace_size > max_semi_space_size_) { 5017 if (initial_semispace_size > max_semi_space_size_) {
5020 initial_semispace_size_ = max_semi_space_size_; 5018 initial_semispace_size_ = max_semi_space_size_;
5021 if (FLAG_trace_gc) { 5019 if (FLAG_trace_gc) {
5022 PrintIsolate(isolate_, 5020 PrintIsolate(isolate_,
5023 "Min semi-space size cannot be more than the maximum " 5021 "Min semi-space size cannot be more than the maximum "
5024 "semi-space size of %d MB\n", 5022 "semi-space size of %" PRIuS " MB\n",
5025 max_semi_space_size_ / MB); 5023 max_semi_space_size_ / MB);
5026 } 5024 }
5027 } else { 5025 } else {
5028 initial_semispace_size_ = 5026 initial_semispace_size_ =
5029 ROUND_UP(initial_semispace_size, Page::kPageSize); 5027 ROUND_UP(initial_semispace_size, Page::kPageSize);
5030 } 5028 }
5031 } 5029 }
5032 5030
5033 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); 5031 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5034 5032
5035 if (FLAG_semi_space_growth_factor < 2) { 5033 if (FLAG_semi_space_growth_factor < 2) {
5036 FLAG_semi_space_growth_factor = 2; 5034 FLAG_semi_space_growth_factor = 2;
5037 } 5035 }
5038 5036
5039 // The old generation is paged and needs at least one page for each space. 5037 // The old generation is paged and needs at least one page for each space.
5040 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; 5038 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5041 max_old_generation_size_ = 5039 max_old_generation_size_ =
5042 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), 5040 Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
5043 max_old_generation_size_); 5041 max_old_generation_size_);
5044 5042
5045 // The max executable size must be less than or equal to the max old 5043 // The max executable size must be less than or equal to the max old
5046 // generation size. 5044 // generation size.
5047 if (max_executable_size_ > max_old_generation_size_) { 5045 if (max_executable_size_ > max_old_generation_size_) {
5048 max_executable_size_ = max_old_generation_size_; 5046 max_executable_size_ = max_old_generation_size_;
5049 } 5047 }
5050 5048
5051 if (FLAG_initial_old_space_size > 0) { 5049 if (FLAG_initial_old_space_size > 0) {
5052 initial_old_generation_size_ = FLAG_initial_old_space_size * MB; 5050 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
5131 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1); 5129 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5132 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise); 5130 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5133 if (gc_state() == Heap::NOT_IN_GC) { 5131 if (gc_state() == Heap::NOT_IN_GC) {
5134 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose); 5132 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5135 } else { 5133 } else {
5136 accumulator.Add("Cannot get stack trace in GC."); 5134 accumulator.Add("Cannot get stack trace in GC.");
5137 } 5135 }
5138 } 5136 }
5139 } 5137 }
5140 5138
5141 5139 size_t Heap::PromotedSpaceSizeOfObjects() {
5142 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5143 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() + 5140 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5144 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects(); 5141 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5145 } 5142 }
5146 5143
5147 5144 uint64_t Heap::PromotedExternalMemorySize() {
5148 int64_t Heap::PromotedExternalMemorySize() {
5149 if (external_memory_ <= external_memory_at_last_mark_compact_) return 0; 5145 if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5150 return external_memory_ - external_memory_at_last_mark_compact_; 5146 return static_cast<uint64_t>(external_memory_ -
5147 external_memory_at_last_mark_compact_);
5151 } 5148 }
5152 5149
5153 5150
5154 const double Heap::kMinHeapGrowingFactor = 1.1; 5151 const double Heap::kMinHeapGrowingFactor = 1.1;
5155 const double Heap::kMaxHeapGrowingFactor = 4.0; 5152 const double Heap::kMaxHeapGrowingFactor = 4.0;
5156 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0; 5153 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5157 const double Heap::kMaxHeapGrowingFactorIdle = 1.5; 5154 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5158 const double Heap::kConservativeHeapGrowingFactor = 1.3; 5155 const double Heap::kConservativeHeapGrowingFactor = 1.3;
5159 const double Heap::kTargetMutatorUtilization = 0.97; 5156 const double Heap::kTargetMutatorUtilization = 0.97;
5160 5157
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
5208 const double b = speed_ratio * (1 - mu) - mu; 5205 const double b = speed_ratio * (1 - mu) - mu;
5209 5206
5210 // The factor is a / b, but we need to check for small b first. 5207 // The factor is a / b, but we need to check for small b first.
5211 double factor = 5208 double factor =
5212 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor; 5209 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
5213 factor = Min(factor, kMaxHeapGrowingFactor); 5210 factor = Min(factor, kMaxHeapGrowingFactor);
5214 factor = Max(factor, kMinHeapGrowingFactor); 5211 factor = Max(factor, kMinHeapGrowingFactor);
5215 return factor; 5212 return factor;
5216 } 5213 }
5217 5214
5218 5215 size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5219 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor, 5216 size_t old_gen_size) {
5220 intptr_t old_gen_size) {
5221 CHECK(factor > 1.0); 5217 CHECK(factor > 1.0);
5222 CHECK(old_gen_size > 0); 5218 CHECK(old_gen_size > 0);
5223 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); 5219 uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
5224 limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep()); 5220 limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
5221 MinimumAllocationLimitGrowingStep());
5225 limit += new_space_->Capacity(); 5222 limit += new_space_->Capacity();
5226 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; 5223 uint64_t halfway_to_the_max =
5227 return Min(limit, halfway_to_the_max); 5224 (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
5225 return static_cast<size_t>(Min(limit, halfway_to_the_max));
5228 } 5226 }
5229 5227
5230 intptr_t Heap::MinimumAllocationLimitGrowingStep() { 5228 size_t Heap::MinimumAllocationLimitGrowingStep() {
5231 const double kRegularAllocationLimitGrowingStep = 8; 5229 const size_t kRegularAllocationLimitGrowingStep = 8;
5232 const double kLowMemoryAllocationLimitGrowingStep = 2; 5230 const size_t kLowMemoryAllocationLimitGrowingStep = 2;
5233 intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB); 5231 size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
5234 return limit * (ShouldOptimizeForMemoryUsage() 5232 return limit * (ShouldOptimizeForMemoryUsage()
5235 ? kLowMemoryAllocationLimitGrowingStep 5233 ? kLowMemoryAllocationLimitGrowingStep
5236 : kRegularAllocationLimitGrowingStep); 5234 : kRegularAllocationLimitGrowingStep);
5237 } 5235 }
5238 5236
5239 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, 5237 void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
5240 double gc_speed,
5241 double mutator_speed) { 5238 double mutator_speed) {
5242 double factor = HeapGrowingFactor(gc_speed, mutator_speed); 5239 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5243 5240
5244 if (FLAG_trace_gc_verbose) { 5241 if (FLAG_trace_gc_verbose) {
5245 isolate_->PrintWithTimestamp( 5242 isolate_->PrintWithTimestamp(
5246 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f " 5243 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5247 "(gc=%.f, mutator=%.f)\n", 5244 "(gc=%.f, mutator=%.f)\n",
5248 factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed, 5245 factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
5249 mutator_speed); 5246 mutator_speed);
5250 } 5247 }
(...skipping 12 matching lines...) Expand all
5263 } 5260 }
5264 5261
5265 if (FLAG_heap_growing_percent > 0) { 5262 if (FLAG_heap_growing_percent > 0) {
5266 factor = 1.0 + FLAG_heap_growing_percent / 100.0; 5263 factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5267 } 5264 }
5268 5265
5269 old_generation_allocation_limit_ = 5266 old_generation_allocation_limit_ =
5270 CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5267 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5271 5268
5272 if (FLAG_trace_gc_verbose) { 5269 if (FLAG_trace_gc_verbose) {
5273 isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR 5270 isolate_->PrintWithTimestamp(
5274 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n", 5271 "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
5275 old_gen_size / KB, 5272 old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
5276 old_generation_allocation_limit_ / KB, factor);
5277 } 5273 }
5278 } 5274 }
5279 5275
5280 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size, 5276 void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
5281 double gc_speed, 5277 double gc_speed,
5282 double mutator_speed) { 5278 double mutator_speed) {
5283 double factor = HeapGrowingFactor(gc_speed, mutator_speed); 5279 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5284 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5280 size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5285 if (limit < old_generation_allocation_limit_) { 5281 if (limit < old_generation_allocation_limit_) {
5286 if (FLAG_trace_gc_verbose) { 5282 if (FLAG_trace_gc_verbose) {
5287 isolate_->PrintWithTimestamp( 5283 isolate_->PrintWithTimestamp(
5288 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR 5284 "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
5289 " KB, " 5285 " KB, "
5290 "new limit: %" V8PRIdPTR " KB (%.1f)\n", 5286 "new limit: %" PRIuS " KB (%.1f)\n",
5291 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB, 5287 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5292 factor); 5288 factor);
5293 } 5289 }
5294 old_generation_allocation_limit_ = limit; 5290 old_generation_allocation_limit_ = limit;
5295 } 5291 }
5296 } 5292 }
5297 5293
5298 // This predicate is called when an old generation space cannot allocated from 5294 // This predicate is called when an old generation space cannot allocated from
5299 // the free list and is about to add a new page. Returning false will cause a 5295 // the free list and is about to add a new page. Returning false will cause a
5300 // major GC. It happens when the old generation allocation limit is reached and 5296 // major GC. It happens when the old generation allocation limit is reached and
(...skipping 13 matching lines...) Expand all
5314 return true; 5310 return true;
5315 } 5311 }
5316 5312
5317 // This function returns either kNoLimit, kSoftLimit, or kHardLimit. 5313 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5318 // The kNoLimit means that either incremental marking is disabled or it is too 5314 // The kNoLimit means that either incremental marking is disabled or it is too
5319 // early to start incremental marking. 5315 // early to start incremental marking.
5320 // The kSoftLimit means that incremental marking should be started soon. 5316 // The kSoftLimit means that incremental marking should be started soon.
5321 // The kHardLimit means that incremental marking should be started immediately. 5317 // The kHardLimit means that incremental marking should be started immediately.
5322 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() { 5318 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5323 if (!incremental_marking()->CanBeActivated() || 5319 if (!incremental_marking()->CanBeActivated() ||
5324 PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) { 5320 PromotedSpaceSizeOfObjects() <=
5321 IncrementalMarking::kActivationThreshold) {
5325 // Incremental marking is disabled or it is too early to start. 5322 // Incremental marking is disabled or it is too early to start.
5326 return IncrementalMarkingLimit::kNoLimit; 5323 return IncrementalMarkingLimit::kNoLimit;
5327 } 5324 }
5328 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) || 5325 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5329 HighMemoryPressure()) { 5326 HighMemoryPressure()) {
5330 // If there is high memory pressure or stress testing is enabled, then 5327 // If there is high memory pressure or stress testing is enabled, then
5331 // start marking immediately. 5328 // start marking immediately.
5332 return IncrementalMarkingLimit::kHardLimit; 5329 return IncrementalMarkingLimit::kHardLimit;
5333 } 5330 }
5334 intptr_t old_generation_space_available = OldGenerationSpaceAvailable(); 5331 size_t old_generation_space_available = OldGenerationSpaceAvailable();
5335 if (old_generation_space_available > new_space_->Capacity()) { 5332 if (old_generation_space_available > new_space_->Capacity()) {
5336 return IncrementalMarkingLimit::kNoLimit; 5333 return IncrementalMarkingLimit::kNoLimit;
5337 } 5334 }
5338 // We are close to the allocation limit. 5335 // We are close to the allocation limit.
5339 // Choose between the hard and the soft limits. 5336 // Choose between the hard and the soft limits.
5340 if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) { 5337 if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
5341 return IncrementalMarkingLimit::kHardLimit; 5338 return IncrementalMarkingLimit::kHardLimit;
5342 } 5339 }
5343 return IncrementalMarkingLimit::kSoftLimit; 5340 return IncrementalMarkingLimit::kSoftLimit;
5344 } 5341 }
5345 5342
5346 void Heap::EnableInlineAllocation() { 5343 void Heap::EnableInlineAllocation() {
5347 if (!inline_allocation_disabled_) return; 5344 if (!inline_allocation_disabled_) return;
5348 inline_allocation_disabled_ = false; 5345 inline_allocation_disabled_ = false;
5349 5346
5350 // Update inline allocation limit for new space. 5347 // Update inline allocation limit for new space.
(...skipping 1114 matching lines...) Expand 10 before | Expand all | Expand 10 after
6465 } 6462 }
6466 6463
6467 6464
6468 // static 6465 // static
6469 int Heap::GetStaticVisitorIdForMap(Map* map) { 6466 int Heap::GetStaticVisitorIdForMap(Map* map) {
6470 return StaticVisitorBase::GetVisitorId(map); 6467 return StaticVisitorBase::GetVisitorId(map);
6471 } 6468 }
6472 6469
6473 } // namespace internal 6470 } // namespace internal
6474 } // namespace v8 6471 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698