OLD | NEW |
---|---|
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include "vm/dart.h" | 7 #include "vm/dart.h" |
8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
10 #include "vm/lockers.h" | 10 #include "vm/lockers.h" |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
316 Scavenger::Scavenger(Heap* heap, | 316 Scavenger::Scavenger(Heap* heap, |
317 intptr_t max_semi_capacity_in_words, | 317 intptr_t max_semi_capacity_in_words, |
318 uword object_alignment) | 318 uword object_alignment) |
319 : heap_(heap), | 319 : heap_(heap), |
320 max_semi_capacity_in_words_(max_semi_capacity_in_words), | 320 max_semi_capacity_in_words_(max_semi_capacity_in_words), |
321 object_alignment_(object_alignment), | 321 object_alignment_(object_alignment), |
322 scavenging_(false), | 322 scavenging_(false), |
323 delayed_weak_properties_(NULL), | 323 delayed_weak_properties_(NULL), |
324 gc_time_micros_(0), | 324 gc_time_micros_(0), |
325 collections_(0), | 325 collections_(0), |
326 scavenge_words_per_micro_(400), | |
327 idle_scavenge_threshold_in_words_(0), | |
326 external_size_(0), | 328 external_size_(0), |
327 failed_to_promote_(false), | 329 failed_to_promote_(false), |
328 space_lock_(new Mutex()) { | 330 space_lock_(new Mutex()) { |
329 // Verify assumptions about the first word in objects which the scavenger is | 331 // Verify assumptions about the first word in objects which the scavenger is |
330 // going to use for forwarding pointers. | 332 // going to use for forwarding pointers. |
331 ASSERT(Object::tags_offset() == 0); | 333 ASSERT(Object::tags_offset() == 0); |
332 | 334 |
333 // Set initial size resulting in a total of three different levels. | 335 // Set initial size resulting in a total of three different levels. |
334 const intptr_t initial_semi_capacity_in_words = | 336 const intptr_t initial_semi_capacity_in_words = |
335 max_semi_capacity_in_words / | 337 max_semi_capacity_in_words / |
336 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); | 338 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); |
337 | 339 |
338 const intptr_t kVmNameSize = 128; | 340 const intptr_t kVmNameSize = 128; |
339 char vm_name[kVmNameSize]; | 341 char vm_name[kVmNameSize]; |
340 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); | 342 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); |
341 to_ = SemiSpace::New(initial_semi_capacity_in_words, vm_name); | 343 to_ = SemiSpace::New(initial_semi_capacity_in_words, vm_name); |
342 if (to_ == NULL) { | 344 if (to_ == NULL) { |
343 OUT_OF_MEMORY(); | 345 OUT_OF_MEMORY(); |
344 } | 346 } |
345 // Setup local fields. | 347 // Setup local fields. |
346 top_ = FirstObjectStart(); | 348 top_ = FirstObjectStart(); |
347 resolved_top_ = top_; | 349 resolved_top_ = top_; |
348 end_ = to_->end(); | 350 end_ = to_->end(); |
349 | 351 |
350 survivor_end_ = FirstObjectStart(); | 352 survivor_end_ = FirstObjectStart(); |
353 idle_scavenge_threshold_in_words_ = initial_semi_capacity_in_words; | |
351 | 354 |
352 UpdateMaxHeapCapacity(); | 355 UpdateMaxHeapCapacity(); |
353 UpdateMaxHeapUsage(); | 356 UpdateMaxHeapUsage(); |
354 } | 357 } |
355 | 358 |
356 Scavenger::~Scavenger() { | 359 Scavenger::~Scavenger() { |
357 ASSERT(!scavenging_); | 360 ASSERT(!scavenging_); |
358 to_->Delete(); | 361 to_->Delete(); |
359 delete space_lock_; | 362 delete space_lock_; |
360 } | 363 } |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
412 avg_frac /= 1.0 + 0.5; // Normalize. | 415 avg_frac /= 1.0 + 0.5; // Normalize. |
413 } | 416 } |
414 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 417 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
415 // Remember the limit to which objects have been copied. | 418 // Remember the limit to which objects have been copied. |
416 survivor_end_ = top_; | 419 survivor_end_ = top_; |
417 } else { | 420 } else { |
418 // Move survivor end to the end of the to_ space, making all surviving | 421 // Move survivor end to the end of the to_ space, making all surviving |
419 // objects candidates for promotion next time. | 422 // objects candidates for promotion next time. |
420 survivor_end_ = end_; | 423 survivor_end_ = end_; |
421 } | 424 } |
425 | |
426 // Update estimate of scavenger speed. This statistic assumes survivorship | |
427 // rates don't change much. | |
428 intptr_t history_used = 1; | |
429 intptr_t history_micros = 1; | |
430 ASSERT(stats_history_.Size() > 0); | |
431 for (intptr_t i = 0; i < stats_history_.Size(); i++) { | |
432 history_used += stats_history_.Get(i).UsedBeforeInWords(); | |
433 history_micros += stats_history_.Get(i).DurationMicros(); | |
434 } | |
435 scavenge_words_per_micro_ = history_used / history_micros; | |
436 | |
437 // Update amount of new-space we must allocate before performing an idle | |
438 // scavenge. This is based on the amount of work we expect to be able to | |
439 // complete in a typical idle period. | |
siva
2017/08/31 01:17:50
Maybe update this comment is a little bit more det
rmacnak
2017/09/01 00:37:00
Done.
| |
440 intptr_t average_idle_task_micros = 4000; | |
441 idle_scavenge_threshold_in_words_ = | |
442 scavenge_words_per_micro_ * average_idle_task_micros; | |
443 intptr_t lower_bound = 512 * KB / kWordSize; | |
444 if (idle_scavenge_threshold_in_words_ < lower_bound) { | |
445 idle_scavenge_threshold_in_words_ = lower_bound; | |
446 } | |
447 intptr_t upper_bound = 8 * CapacityInWords() / 10; | |
448 if (idle_scavenge_threshold_in_words_ > upper_bound) { | |
449 idle_scavenge_threshold_in_words_ = upper_bound; | |
450 } | |
451 | |
422 #if defined(DEBUG) | 452 #if defined(DEBUG) |
423 // We can only safely verify the store buffers from old space if there is no | 453 // We can only safely verify the store buffers from old space if there is no |
424 // concurrent old space task. At the same time we prevent new tasks from | 454 // concurrent old space task. At the same time we prevent new tasks from |
425 // being spawned. | 455 // being spawned. |
426 { | 456 { |
427 PageSpace* page_space = heap_->old_space(); | 457 PageSpace* page_space = heap_->old_space(); |
428 MonitorLocker ml(page_space->tasks_lock()); | 458 MonitorLocker ml(page_space->tasks_lock()); |
429 if (page_space->tasks() == 0) { | 459 if (page_space->tasks() == 0) { |
430 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); | 460 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); |
431 heap_->old_space()->VisitObjectPointers(&verify_store_buffer_visitor); | 461 heap_->old_space()->VisitObjectPointers(&verify_store_buffer_visitor); |
432 } | 462 } |
433 } | 463 } |
434 #endif // defined(DEBUG) | 464 #endif // defined(DEBUG) |
435 from->Delete(); | 465 from->Delete(); |
436 UpdateMaxHeapUsage(); | 466 UpdateMaxHeapUsage(); |
437 if (heap_ != NULL) { | 467 if (heap_ != NULL) { |
438 heap_->UpdateGlobalMaxUsed(); | 468 heap_->UpdateGlobalMaxUsed(); |
439 } | 469 } |
440 } | 470 } |
441 | 471 |
472 bool Scavenger::ShouldPerformIdleScavenge(int64_t deadline) { | |
473 intptr_t used_in_words = UsedInWords(); | |
474 if (used_in_words < idle_scavenge_threshold_in_words_) { | |
475 return false; | |
476 } | |
siva
2017/08/31 01:17:50
Should have a history of idle notification deadlin
rmacnak
2017/09/01 00:37:00
Acknowledged.
| |
477 int64_t estimated_scavenge_completion = | |
478 OS::GetCurrentMonotonicMicros() + | |
479 used_in_words / scavenge_words_per_micro_; | |
480 return estimated_scavenge_completion <= deadline; | |
481 } | |
482 | |
442 void Scavenger::IterateStoreBuffers(Isolate* isolate, | 483 void Scavenger::IterateStoreBuffers(Isolate* isolate, |
443 ScavengerVisitor* visitor) { | 484 ScavengerVisitor* visitor) { |
444 // Iterating through the store buffers. | 485 // Iterating through the store buffers. |
445 // Grab the deduplication sets out of the isolate's consolidated store buffer. | 486 // Grab the deduplication sets out of the isolate's consolidated store buffer. |
446 StoreBufferBlock* pending = isolate->store_buffer()->Blocks(); | 487 StoreBufferBlock* pending = isolate->store_buffer()->Blocks(); |
447 intptr_t total_count = 0; | 488 intptr_t total_count = 0; |
448 while (pending != NULL) { | 489 while (pending != NULL) { |
449 StoreBufferBlock* next = pending->next(); | 490 StoreBufferBlock* next = pending->next(); |
450 // Generated code appends to store buffers; tell MemorySanitizer. | 491 // Generated code appends to store buffers; tell MemorySanitizer. |
451 MSAN_UNPOISON(pending, sizeof(*pending)); | 492 MSAN_UNPOISON(pending, sizeof(*pending)); |
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
787 } | 828 } |
788 | 829 |
789 void Scavenger::Scavenge() { | 830 void Scavenger::Scavenge() { |
790 Isolate* isolate = heap_->isolate(); | 831 Isolate* isolate = heap_->isolate(); |
791 // Ensure that all threads for this isolate are at a safepoint (either stopped | 832 // Ensure that all threads for this isolate are at a safepoint (either stopped |
792 // or in native code). If two threads are racing at this point, the loser | 833 // or in native code). If two threads are racing at this point, the loser |
793 // will continue with its scavenge after waiting for the winner to complete. | 834 // will continue with its scavenge after waiting for the winner to complete. |
794 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry | 835 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry |
795 // logic to avoid needless collections. | 836 // logic to avoid needless collections. |
796 | 837 |
797 int64_t pre_safe_point = OS::GetCurrentMonotonicMicros(); | 838 int64_t start = OS::GetCurrentMonotonicMicros(); |
798 | 839 |
799 Thread* thread = Thread::Current(); | 840 Thread* thread = Thread::Current(); |
800 SafepointOperationScope safepoint_scope(thread); | 841 SafepointOperationScope safepoint_scope(thread); |
801 | 842 |
802 // Scavenging is not reentrant. Make sure that is the case. | 843 // Scavenging is not reentrant. Make sure that is the case. |
803 ASSERT(!scavenging_); | 844 ASSERT(!scavenging_); |
804 scavenging_ = true; | 845 scavenging_ = true; |
805 | 846 |
806 failed_to_promote_ = false; | 847 failed_to_promote_ = false; |
807 | 848 |
808 PageSpace* page_space = heap_->old_space(); | 849 PageSpace* page_space = heap_->old_space(); |
809 NoSafepointScope no_safepoints; | 850 NoSafepointScope no_safepoints; |
810 | 851 |
811 int64_t post_safe_point = OS::GetCurrentMonotonicMicros(); | 852 int64_t safe_point = OS::GetCurrentMonotonicMicros(); |
812 heap_->RecordTime(kSafePoint, post_safe_point - pre_safe_point); | 853 heap_->RecordTime(kSafePoint, safe_point - start); |
813 | 854 |
814 AbandonAllTLABs(isolate); | 855 AbandonAllTLABs(isolate); |
815 | 856 |
816 // TODO(koda): Make verification more compatible with concurrent sweep. | 857 // TODO(koda): Make verification more compatible with concurrent sweep. |
817 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { | 858 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { |
818 OS::PrintErr("Verifying before Scavenge..."); | 859 OS::PrintErr("Verifying before Scavenge..."); |
819 heap_->Verify(kForbidMarked); | 860 heap_->Verify(kForbidMarked); |
820 OS::PrintErr(" done.\n"); | 861 OS::PrintErr(" done.\n"); |
821 } | 862 } |
822 | 863 |
823 // Prepare for a scavenge. | 864 // Prepare for a scavenge. |
824 SpaceUsage usage_before = GetCurrentUsage(); | 865 SpaceUsage usage_before = GetCurrentUsage(); |
825 intptr_t promo_candidate_words = | 866 intptr_t promo_candidate_words = |
826 (survivor_end_ - FirstObjectStart()) / kWordSize; | 867 (survivor_end_ - FirstObjectStart()) / kWordSize; |
827 SemiSpace* from = Prologue(isolate); | 868 SemiSpace* from = Prologue(isolate); |
828 // The API prologue/epilogue may create/destroy zones, so we must not | 869 // The API prologue/epilogue may create/destroy zones, so we must not |
829 // depend on zone allocations surviving beyond the epilogue callback. | 870 // depend on zone allocations surviving beyond the epilogue callback. |
830 { | 871 { |
831 StackZone zone(thread); | 872 StackZone zone(thread); |
832 // Setup the visitor and run the scavenge. | 873 // Setup the visitor and run the scavenge. |
833 ScavengerVisitor visitor(isolate, this, from); | 874 ScavengerVisitor visitor(isolate, this, from); |
834 page_space->AcquireDataLock(); | 875 page_space->AcquireDataLock(); |
835 IterateRoots(isolate, &visitor); | 876 IterateRoots(isolate, &visitor); |
836 int64_t start = OS::GetCurrentMonotonicMicros(); | 877 int64_t iterate_roots = OS::GetCurrentMonotonicMicros(); |
837 ProcessToSpace(&visitor); | 878 ProcessToSpace(&visitor); |
838 int64_t middle = OS::GetCurrentMonotonicMicros(); | 879 int64_t process_to_space = OS::GetCurrentMonotonicMicros(); |
839 { | 880 { |
840 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakHandleProcessing"); | 881 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakHandleProcessing"); |
841 ScavengerWeakVisitor weak_visitor(thread, this); | 882 ScavengerWeakVisitor weak_visitor(thread, this); |
842 IterateWeakRoots(isolate, &weak_visitor); | 883 IterateWeakRoots(isolate, &weak_visitor); |
843 } | 884 } |
844 ProcessWeakReferences(); | 885 ProcessWeakReferences(); |
845 page_space->ReleaseDataLock(); | 886 page_space->ReleaseDataLock(); |
846 | 887 |
847 // Scavenge finished. Run accounting. | 888 // Scavenge finished. Run accounting. |
848 int64_t end = OS::GetCurrentMonotonicMicros(); | 889 int64_t end = OS::GetCurrentMonotonicMicros(); |
849 heap_->RecordTime(kProcessToSpace, middle - start); | 890 heap_->RecordTime(kProcessToSpace, process_to_space - iterate_roots); |
850 heap_->RecordTime(kIterateWeaks, end - middle); | 891 heap_->RecordTime(kIterateWeaks, end - process_to_space); |
851 stats_history_.Add(ScavengeStats( | 892 stats_history_.Add(ScavengeStats( |
852 start, end, usage_before, GetCurrentUsage(), promo_candidate_words, | 893 start, end, usage_before, GetCurrentUsage(), promo_candidate_words, |
853 visitor.bytes_promoted() >> kWordSizeLog2)); | 894 visitor.bytes_promoted() >> kWordSizeLog2)); |
854 } | 895 } |
855 Epilogue(isolate, from); | 896 Epilogue(isolate, from); |
856 | 897 |
857 // TODO(koda): Make verification more compatible with concurrent sweep. | 898 // TODO(koda): Make verification more compatible with concurrent sweep. |
858 if (FLAG_verify_after_gc && !FLAG_concurrent_sweep) { | 899 if (FLAG_verify_after_gc && !FLAG_concurrent_sweep) { |
859 OS::PrintErr("Verifying after Scavenge..."); | 900 OS::PrintErr("Verifying after Scavenge..."); |
860 heap_->Verify(kForbidMarked); | 901 heap_->Verify(kForbidMarked); |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
949 return free_space >> kWordSizeLog2; | 990 return free_space >> kWordSizeLog2; |
950 } | 991 } |
951 | 992 |
952 int64_t Scavenger::UsedInWords() const { | 993 int64_t Scavenger::UsedInWords() const { |
953 int64_t free_space_in_tlab = FreeSpaceInWords(heap_->isolate()); | 994 int64_t free_space_in_tlab = FreeSpaceInWords(heap_->isolate()); |
954 int64_t max_space_used = (top_ - FirstObjectStart()) >> kWordSizeLog2; | 995 int64_t max_space_used = (top_ - FirstObjectStart()) >> kWordSizeLog2; |
955 return max_space_used - free_space_in_tlab; | 996 return max_space_used - free_space_in_tlab; |
956 } | 997 } |
957 | 998 |
958 } // namespace dart | 999 } // namespace dart |
OLD | NEW |