OLD | NEW |
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include "vm/dart.h" | 7 #include "vm/dart.h" |
8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
10 #include "vm/lockers.h" | 10 #include "vm/lockers.h" |
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
387 if (to_ == NULL) { | 387 if (to_ == NULL) { |
388 // TODO(koda): We could try to recover (collect old space, wait for another | 388 // TODO(koda): We could try to recover (collect old space, wait for another |
389 // isolate to finish scavenge, etc.). | 389 // isolate to finish scavenge, etc.). |
390 OUT_OF_MEMORY(); | 390 OUT_OF_MEMORY(); |
391 } | 391 } |
392 UpdateMaxHeapCapacity(); | 392 UpdateMaxHeapCapacity(); |
393 top_ = FirstObjectStart(); | 393 top_ = FirstObjectStart(); |
394 resolved_top_ = top_; | 394 resolved_top_ = top_; |
395 end_ = to_->end(); | 395 end_ = to_->end(); |
396 | 396 |
| 397 // Throw out the old information about the from space |
| 398 if (isolate->IsMutatorThreadScheduled()) { |
| 399 Thread* mutator_thread = isolate->mutator_thread(); |
| 400 mutator_thread->set_top(top_); |
| 401 mutator_thread->set_end(end_); |
| 402 } |
| 403 |
397 return from; | 404 return from; |
398 } | 405 } |
399 | 406 |
400 void Scavenger::Epilogue(Isolate* isolate, | 407 void Scavenger::Epilogue(Isolate* isolate, |
401 SemiSpace* from, | 408 SemiSpace* from, |
402 bool invoke_api_callbacks) { | 409 bool invoke_api_callbacks) { |
403 // All objects in the to space have been copied from the from space at this | 410 // All objects in the to space have been copied from the from space at this |
404 // moment. | 411 // moment. |
405 | 412 |
406 // Ensure the mutator thread will fail the next allocation. This will force | 413 // Ensure the mutator thread now has the up-to-date top_ and end_ of the |
407 // mutator to allocate a new TLAB | 414 // semispace |
408 Thread* mutator_thread = isolate->mutator_thread(); | 415 if (isolate->IsMutatorThreadScheduled()) { |
409 ASSERT((mutator_thread == NULL) || (!mutator_thread->HasActiveTLAB())); | 416 Thread* thread = isolate->mutator_thread(); |
| 417 thread->set_top(top_); |
| 418 thread->set_end(end_); |
| 419 } |
410 | 420 |
411 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); | 421 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
412 if (stats_history_.Size() >= 2) { | 422 if (stats_history_.Size() >= 2) { |
413 // Previous scavenge is only given half as much weight. | 423 // Previous scavenge is only given half as much weight. |
414 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); | 424 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
415 avg_frac /= 1.0 + 0.5; // Normalize. | 425 avg_frac /= 1.0 + 0.5; // Normalize. |
416 } | 426 } |
417 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 427 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
418 // Remember the limit to which objects have been copied. | 428 // Remember the limit to which objects have been copied. |
419 survivor_end_ = top_; | 429 survivor_end_ = top_; |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
705 #endif // defined(DEBUG) | 715 #endif // defined(DEBUG) |
706 | 716 |
707 WeakProperty::Clear(cur_weak); | 717 WeakProperty::Clear(cur_weak); |
708 | 718 |
709 // Advance to next weak property in the queue. | 719 // Advance to next weak property in the queue. |
710 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 720 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
711 } | 721 } |
712 } | 722 } |
713 } | 723 } |
714 | 724 |
715 void Scavenger::MakeNewSpaceIterable() const { | 725 void Scavenger::FlushTLS() const { |
716 ASSERT(heap_ != NULL); | 726 ASSERT(heap_ != NULL); |
717 Thread* mutator_thread = heap_->isolate()->mutator_thread(); | 727 if (heap_->isolate()->IsMutatorThreadScheduled()) { |
718 if (mutator_thread != NULL && !scavenging_) { | 728 Thread* mutator_thread = heap_->isolate()->mutator_thread(); |
719 if (mutator_thread->HasActiveTLAB()) { | 729 mutator_thread->heap()->new_space()->set_top(mutator_thread->top()); |
720 ASSERT(mutator_thread->top() <= | |
721 mutator_thread->heap()->new_space()->top()); | |
722 heap_->FillRemainingTLAB(mutator_thread); | |
723 } | |
724 } | 730 } |
725 } | 731 } |
726 | 732 |
727 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { | 733 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
728 ASSERT(Thread::Current()->IsAtSafepoint() || | 734 ASSERT(Thread::Current()->IsAtSafepoint() || |
729 (Thread::Current()->task_kind() == Thread::kMarkerTask)); | 735 (Thread::Current()->task_kind() == Thread::kMarkerTask)); |
730 MakeNewSpaceIterable(); | 736 FlushTLS(); |
731 uword cur = FirstObjectStart(); | 737 uword cur = FirstObjectStart(); |
732 while (cur < top_) { | 738 while (cur < top_) { |
733 RawObject* raw_obj = RawObject::FromAddr(cur); | 739 RawObject* raw_obj = RawObject::FromAddr(cur); |
734 cur += raw_obj->VisitPointers(visitor); | 740 cur += raw_obj->VisitPointers(visitor); |
735 } | 741 } |
736 } | 742 } |
737 | 743 |
738 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { | 744 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
739 ASSERT(Thread::Current()->IsAtSafepoint() || | 745 ASSERT(Thread::Current()->IsAtSafepoint() || |
740 (Thread::Current()->task_kind() == Thread::kMarkerTask)); | 746 (Thread::Current()->task_kind() == Thread::kMarkerTask)); |
741 MakeNewSpaceIterable(); | 747 FlushTLS(); |
742 uword cur = FirstObjectStart(); | 748 uword cur = FirstObjectStart(); |
743 while (cur < top_) { | 749 while (cur < top_) { |
744 RawObject* raw_obj = RawObject::FromAddr(cur); | 750 RawObject* raw_obj = RawObject::FromAddr(cur); |
745 visitor->VisitObject(raw_obj); | 751 visitor->VisitObject(raw_obj); |
746 cur += raw_obj->Size(); | 752 cur += raw_obj->Size(); |
747 } | 753 } |
748 } | 754 } |
749 | 755 |
750 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { | 756 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
751 set->AddRegion(to_->start(), to_->end()); | 757 set->AddRegion(to_->start(), to_->end()); |
752 } | 758 } |
753 | 759 |
754 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { | 760 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
755 ASSERT(!scavenging_); | 761 ASSERT(!scavenging_); |
756 MakeNewSpaceIterable(); | 762 FlushTLS(); |
757 uword cur = FirstObjectStart(); | 763 uword cur = FirstObjectStart(); |
758 if (visitor->VisitRange(cur, top_)) { | 764 if (visitor->VisitRange(cur, top_)) { |
759 while (cur < top_) { | 765 while (cur < top_) { |
760 RawObject* raw_obj = RawObject::FromAddr(cur); | 766 RawObject* raw_obj = RawObject::FromAddr(cur); |
761 uword next = cur + raw_obj->Size(); | 767 uword next = cur + raw_obj->Size(); |
762 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { | 768 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { |
763 return raw_obj; // Found object, return it. | 769 return raw_obj; // Found object, return it. |
764 } | 770 } |
765 cur = next; | 771 cur = next; |
766 } | 772 } |
(...skipping 26 matching lines...) Expand all Loading... |
793 scavenging_ = true; | 799 scavenging_ = true; |
794 | 800 |
795 failed_to_promote_ = false; | 801 failed_to_promote_ = false; |
796 | 802 |
797 PageSpace* page_space = heap_->old_space(); | 803 PageSpace* page_space = heap_->old_space(); |
798 NoSafepointScope no_safepoints; | 804 NoSafepointScope no_safepoints; |
799 | 805 |
800 int64_t post_safe_point = OS::GetCurrentMonotonicMicros(); | 806 int64_t post_safe_point = OS::GetCurrentMonotonicMicros(); |
801 heap_->RecordTime(kSafePoint, post_safe_point - pre_safe_point); | 807 heap_->RecordTime(kSafePoint, post_safe_point - pre_safe_point); |
802 | 808 |
803 Thread* mutator_thread = isolate->mutator_thread(); | |
804 if ((mutator_thread != NULL) && (mutator_thread->HasActiveTLAB())) { | |
805 heap_->AbandonRemainingTLAB(mutator_thread); | |
806 } | |
807 | |
808 // TODO(koda): Make verification more compatible with concurrent sweep. | 809 // TODO(koda): Make verification more compatible with concurrent sweep. |
809 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { | 810 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { |
810 OS::PrintErr("Verifying before Scavenge..."); | 811 OS::PrintErr("Verifying before Scavenge..."); |
811 heap_->Verify(kForbidMarked); | 812 heap_->Verify(kForbidMarked); |
812 OS::PrintErr(" done.\n"); | 813 OS::PrintErr(" done.\n"); |
813 } | 814 } |
814 | 815 |
815 // Prepare for a scavenge. | 816 // Prepare for a scavenge. |
816 SpaceUsage usage_before = GetCurrentUsage(); | 817 SpaceUsage usage_before = GetCurrentUsage(); |
817 intptr_t promo_candidate_words = | 818 intptr_t promo_candidate_words = |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
909 // the scavenge. | 910 // the scavenge. |
910 // The former can introduce an object that we might fail to collect. | 911 // The former can introduce an object that we might fail to collect. |
911 // The latter means even if the scavenge promotes every object in the new | 912 // The latter means even if the scavenge promotes every object in the new |
912 // space, the new allocation means the space is not empty, | 913 // space, the new allocation means the space is not empty, |
913 // causing the assertion below to fail. | 914 // causing the assertion below to fail. |
914 SafepointOperationScope scope(Thread::Current()); | 915 SafepointOperationScope scope(Thread::Current()); |
915 | 916 |
916 // Forces the next scavenge to promote all the objects in the new space. | 917 // Forces the next scavenge to promote all the objects in the new space. |
917 survivor_end_ = top_; | 918 survivor_end_ = top_; |
918 | 919 |
| 920 if (heap_->isolate()->IsMutatorThreadScheduled()) { |
| 921 Thread* mutator_thread = heap_->isolate()->mutator_thread(); |
| 922 survivor_end_ = mutator_thread->top(); |
| 923 } |
| 924 |
919 Scavenge(); | 925 Scavenge(); |
920 | 926 |
921 // It is possible for objects to stay in the new space | 927 // It is possible for objects to stay in the new space |
922 // if the VM cannot create more pages for these objects. | 928 // if the VM cannot create more pages for these objects. |
923 ASSERT((UsedInWords() == 0) || failed_to_promote_); | 929 ASSERT((UsedInWords() == 0) || failed_to_promote_); |
924 } | 930 } |
925 | 931 |
926 int64_t Scavenger::UsedInWords() const { | |
927 int64_t free_space_in_tlab = 0; | |
928 if (heap_->isolate()->IsMutatorThreadScheduled()) { | |
929 Thread* mutator_thread = heap_->isolate()->mutator_thread(); | |
930 if (mutator_thread->HasActiveTLAB()) { | |
931 free_space_in_tlab = | |
932 (mutator_thread->end() - mutator_thread->top()) >> kWordSizeLog2; | |
933 } | |
934 } | |
935 int64_t max_space_used = (top_ - FirstObjectStart()) >> kWordSizeLog2; | |
936 return max_space_used - free_space_in_tlab; | |
937 } | |
938 | |
939 } // namespace dart | 932 } // namespace dart |
OLD | NEW |