OLD | NEW |
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include "vm/dart.h" | 7 #include "vm/dart.h" |
8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
10 #include "vm/lockers.h" | 10 #include "vm/lockers.h" |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
130 // Get the new location of the object. | 130 // Get the new location of the object. |
131 new_addr = ForwardedAddr(header); | 131 new_addr = ForwardedAddr(header); |
132 } else { | 132 } else { |
133 intptr_t size = raw_obj->Size(); | 133 intptr_t size = raw_obj->Size(); |
134 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); | 134 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); |
135 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); | 135 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); |
136 // Check whether object should be promoted. | 136 // Check whether object should be promoted. |
137 if (scavenger_->survivor_end_ <= raw_addr) { | 137 if (scavenger_->survivor_end_ <= raw_addr) { |
138 // Not a survivor of a previous scavenge. Just copy the object into the | 138 // Not a survivor of a previous scavenge. Just copy the object into the |
139 // to space. | 139 // to space. |
140 new_addr = scavenger_->TryAllocate(size); | 140 new_addr = scavenger_->TryAllocateGC(size); |
141 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 141 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
142 } else { | 142 } else { |
143 // TODO(iposva): Experiment with less aggressive promotion. For example | 143 // TODO(iposva): Experiment with less aggressive promotion. For example |
144 // a coin toss determines if an object is promoted or whether it should | 144 // a coin toss determines if an object is promoted or whether it should |
145 // survive in this generation. | 145 // survive in this generation. |
146 // | 146 // |
147 // This object is a survivor of a previous scavenge. Attempt to promote | 147 // This object is a survivor of a previous scavenge. Attempt to promote |
148 // the object. | 148 // the object. |
149 new_addr = | 149 new_addr = |
150 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); | 150 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); |
151 if (new_addr != 0) { | 151 if (new_addr != 0) { |
152 // If promotion succeeded then we need to remember it so that it can | 152 // If promotion succeeded then we need to remember it so that it can |
153 // be traversed later. | 153 // be traversed later. |
154 scavenger_->PushToPromotedStack(new_addr); | 154 scavenger_->PushToPromotedStack(new_addr); |
155 bytes_promoted_ += size; | 155 bytes_promoted_ += size; |
156 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); | 156 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); |
157 } else { | 157 } else { |
158 // Promotion did not succeed. Copy into the to space instead. | 158 // Promotion did not succeed. Copy into the to space instead. |
159 scavenger_->failed_to_promote_ = true; | 159 scavenger_->failed_to_promote_ = true; |
160 new_addr = scavenger_->TryAllocate(size); | 160 new_addr = scavenger_->TryAllocateGC(size); |
161 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 161 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
162 } | 162 } |
163 } | 163 } |
164 // During a scavenge we always succeed to at least copy all of the | 164 // During a scavenge we always succeed to at least copy all of the |
165 // current objects to the to space. | 165 // current objects to the to space. |
166 ASSERT(new_addr != 0); | 166 ASSERT(new_addr != 0); |
167 // Copy the object to the new location. | 167 // Copy the object to the new location. |
168 memmove(reinterpret_cast<void*>(new_addr), | 168 memmove(reinterpret_cast<void*>(new_addr), |
169 reinterpret_cast<void*>(raw_addr), size); | 169 reinterpret_cast<void*>(raw_addr), size); |
170 // Remember forwarding address. | 170 // Remember forwarding address. |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
385 return old_size_in_words; | 385 return old_size_in_words; |
386 } | 386 } |
387 } | 387 } |
388 | 388 |
389 | 389 |
390 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { | 390 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
391 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { | 391 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { |
392 (isolate->gc_prologue_callback())(); | 392 (isolate->gc_prologue_callback())(); |
393 } | 393 } |
394 isolate->PrepareForGC(); | 394 isolate->PrepareForGC(); |
| 395 |
395 // Flip the two semi-spaces so that to_ is always the space for allocating | 396 // Flip the two semi-spaces so that to_ is always the space for allocating |
396 // objects. | 397 // objects. |
397 SemiSpace* from = to_; | 398 SemiSpace* from = to_; |
398 | 399 |
399 const intptr_t kVmNameSize = 128; | 400 const intptr_t kVmNameSize = 128; |
400 char vm_name[kVmNameSize]; | 401 char vm_name[kVmNameSize]; |
401 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); | 402 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); |
402 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); | 403 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); |
403 if (to_ == NULL) { | 404 if (to_ == NULL) { |
404 // TODO(koda): We could try to recover (collect old space, wait for another | 405 // TODO(koda): We could try to recover (collect old space, wait for another |
405 // isolate to finish scavenge, etc.). | 406 // isolate to finish scavenge, etc.). |
406 OUT_OF_MEMORY(); | 407 OUT_OF_MEMORY(); |
407 } | 408 } |
408 UpdateMaxHeapCapacity(); | 409 UpdateMaxHeapCapacity(); |
409 top_ = FirstObjectStart(); | 410 top_ = FirstObjectStart(); |
410 resolved_top_ = top_; | 411 resolved_top_ = top_; |
411 end_ = to_->end(); | 412 end_ = to_->end(); |
| 413 |
| 414 // Throw out the old information about the from space |
| 415 if (isolate->IsMutatorThreadScheduled()) { |
| 416 Thread* mutator_thread = isolate->mutator_thread(); |
| 417 mutator_thread->set_top(top_); |
| 418 mutator_thread->set_end(end_); |
| 419 } |
| 420 |
412 return from; | 421 return from; |
413 } | 422 } |
414 | 423 |
415 | 424 |
416 void Scavenger::Epilogue(Isolate* isolate, | 425 void Scavenger::Epilogue(Isolate* isolate, |
417 SemiSpace* from, | 426 SemiSpace* from, |
418 bool invoke_api_callbacks) { | 427 bool invoke_api_callbacks) { |
419 // All objects in the to space have been copied from the from space at this | 428 // All objects in the to space have been copied from the from space at this |
420 // moment. | 429 // moment. |
| 430 |
| 431 // Ensure the mutator thread now has the up-to-date top_ and end_ of the |
| 432 // semispace |
| 433 if (isolate->IsMutatorThreadScheduled()) { |
| 434 Thread* thread = isolate->mutator_thread(); |
| 435 thread->set_top(top_); |
| 436 thread->set_end(end_); |
| 437 } |
| 438 |
421 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); | 439 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
422 if (stats_history_.Size() >= 2) { | 440 if (stats_history_.Size() >= 2) { |
423 // Previous scavenge is only given half as much weight. | 441 // Previous scavenge is only given half as much weight. |
424 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); | 442 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
425 avg_frac /= 1.0 + 0.5; // Normalize. | 443 avg_frac /= 1.0 + 0.5; // Normalize. |
426 } | 444 } |
427 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 445 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
428 // Remember the limit to which objects have been copied. | 446 // Remember the limit to which objects have been copied. |
429 survivor_end_ = top_; | 447 survivor_end_ = top_; |
430 } else { | 448 } else { |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
723 | 741 |
724 WeakProperty::Clear(cur_weak); | 742 WeakProperty::Clear(cur_weak); |
725 | 743 |
726 // Advance to next weak property in the queue. | 744 // Advance to next weak property in the queue. |
727 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 745 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
728 } | 746 } |
729 } | 747 } |
730 } | 748 } |
731 | 749 |
732 | 750 |
| 751 void Scavenger::FlushTLS() const { |
| 752 if (heap_ != NULL) { // Added in for the ZeroSizeScavenger Test |
| 753 if (heap_->isolate()->IsMutatorThreadScheduled()) { |
| 754 Thread* mutator_thread = heap_->isolate()->mutator_thread(); |
| 755 mutator_thread->heap()->new_space()->set_top(mutator_thread->top()); |
| 756 } |
| 757 } |
| 758 } |
| 759 |
| 760 |
733 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { | 761 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
| 762 FlushTLS(); |
734 uword cur = FirstObjectStart(); | 763 uword cur = FirstObjectStart(); |
735 while (cur < top_) { | 764 while (cur < top_) { |
736 RawObject* raw_obj = RawObject::FromAddr(cur); | 765 RawObject* raw_obj = RawObject::FromAddr(cur); |
737 cur += raw_obj->VisitPointers(visitor); | 766 cur += raw_obj->VisitPointers(visitor); |
738 } | 767 } |
739 } | 768 } |
740 | 769 |
741 | 770 |
742 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { | 771 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
| 772 FlushTLS(); |
743 uword cur = FirstObjectStart(); | 773 uword cur = FirstObjectStart(); |
744 while (cur < top_) { | 774 while (cur < top_) { |
745 RawObject* raw_obj = RawObject::FromAddr(cur); | 775 RawObject* raw_obj = RawObject::FromAddr(cur); |
746 visitor->VisitObject(raw_obj); | 776 visitor->VisitObject(raw_obj); |
747 cur += raw_obj->Size(); | 777 cur += raw_obj->Size(); |
748 } | 778 } |
749 } | 779 } |
750 | 780 |
751 | 781 |
752 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { | 782 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
753 set->AddRegion(to_->start(), to_->end()); | 783 set->AddRegion(to_->start(), to_->end()); |
754 } | 784 } |
755 | 785 |
756 | 786 |
757 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { | 787 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
758 ASSERT(!scavenging_); | 788 ASSERT(!scavenging_); |
| 789 FlushTLS(); |
759 uword cur = FirstObjectStart(); | 790 uword cur = FirstObjectStart(); |
760 if (visitor->VisitRange(cur, top_)) { | 791 if (visitor->VisitRange(cur, top_)) { |
761 while (cur < top_) { | 792 while (cur < top_) { |
762 RawObject* raw_obj = RawObject::FromAddr(cur); | 793 RawObject* raw_obj = RawObject::FromAddr(cur); |
763 uword next = cur + raw_obj->Size(); | 794 uword next = cur + raw_obj->Size(); |
764 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { | 795 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { |
765 return raw_obj; // Found object, return it. | 796 return raw_obj; // Found object, return it. |
766 } | 797 } |
767 cur = next; | 798 cur = next; |
768 } | 799 } |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
912 // We need a safepoint here to prevent allocation right before or right after | 943 // We need a safepoint here to prevent allocation right before or right after |
913 // the scavenge. | 944 // the scavenge. |
914 // The former can introduce an object that we might fail to collect. | 945 // The former can introduce an object that we might fail to collect. |
915 // The latter means even if the scavenge promotes every object in the new | 946 // The latter means even if the scavenge promotes every object in the new |
916 // space, the new allocation means the space is not empty, | 947 // space, the new allocation means the space is not empty, |
917 // causing the assertion below to fail. | 948 // causing the assertion below to fail. |
918 SafepointOperationScope scope(Thread::Current()); | 949 SafepointOperationScope scope(Thread::Current()); |
919 | 950 |
920 // Forces the next scavenge to promote all the objects in the new space. | 951 // Forces the next scavenge to promote all the objects in the new space. |
921 survivor_end_ = top_; | 952 survivor_end_ = top_; |
| 953 |
| 954 if (heap_->isolate()->IsMutatorThreadScheduled()) { |
| 955 Thread* mutator_thread = heap_->isolate()->mutator_thread(); |
| 956 survivor_end_ = mutator_thread->top(); |
| 957 } |
| 958 |
922 Scavenge(); | 959 Scavenge(); |
923 | 960 |
924 // It is possible for objects to stay in the new space | 961 // It is possible for objects to stay in the new space |
925 // if the VM cannot create more pages for these objects. | 962 // if the VM cannot create more pages for these objects. |
926 ASSERT((UsedInWords() == 0) || failed_to_promote_); | 963 ASSERT((UsedInWords() == 0) || failed_to_promote_); |
927 } | 964 } |
928 | 965 |
929 } // namespace dart | 966 } // namespace dart |
OLD | NEW |