OLD | NEW |
---|---|
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <map> | 8 #include <map> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
66 | 66 |
67 | 67 |
68 class ScavengerVisitor : public ObjectPointerVisitor { | 68 class ScavengerVisitor : public ObjectPointerVisitor { |
69 public: | 69 public: |
70 explicit ScavengerVisitor(Isolate* isolate, Scavenger* scavenger) | 70 explicit ScavengerVisitor(Isolate* isolate, Scavenger* scavenger) |
71 : ObjectPointerVisitor(isolate), | 71 : ObjectPointerVisitor(isolate), |
72 scavenger_(scavenger), | 72 scavenger_(scavenger), |
73 heap_(scavenger->heap_), | 73 heap_(scavenger->heap_), |
74 vm_heap_(Dart::vm_isolate()->heap()), | 74 vm_heap_(Dart::vm_isolate()->heap()), |
75 delayed_weak_stack_(), | 75 delayed_weak_stack_(), |
76 growth_policy_(PageSpace::kControlGrowth), | |
77 bytes_promoted_(0), | |
76 visiting_old_pointers_(false), | 78 visiting_old_pointers_(false), |
77 in_scavenge_pointer_(false) {} | 79 in_scavenge_pointer_(false) {} |
78 | 80 |
79 void VisitPointers(RawObject** first, RawObject** last) { | 81 void VisitPointers(RawObject** first, RawObject** last) { |
80 for (RawObject** current = first; current <= last; current++) { | 82 for (RawObject** current = first; current <= last; current++) { |
81 ScavengePointer(current); | 83 ScavengePointer(current); |
82 } | 84 } |
83 } | 85 } |
84 | 86 |
85 GrowableArray<RawObject*>* DelayedWeakStack() { | 87 GrowableArray<RawObject*>* DelayedWeakStack() { |
(...skipping 14 matching lines...) Expand all Loading... | |
100 delay_set_.insert(std::make_pair(raw_key, raw_weak)); | 102 delay_set_.insert(std::make_pair(raw_key, raw_weak)); |
101 } | 103 } |
102 | 104 |
103 void Finalize() { | 105 void Finalize() { |
104 DelaySet::iterator it = delay_set_.begin(); | 106 DelaySet::iterator it = delay_set_.begin(); |
105 for (; it != delay_set_.end(); ++it) { | 107 for (; it != delay_set_.end(); ++it) { |
106 WeakProperty::Clear(it->second); | 108 WeakProperty::Clear(it->second); |
107 } | 109 } |
108 } | 110 } |
109 | 111 |
112 intptr_t bytes_promoted() { return bytes_promoted_; } | |
113 | |
110 private: | 114 private: |
111 void UpdateStoreBuffer(RawObject** p, RawObject* obj) { | 115 void UpdateStoreBuffer(RawObject** p, RawObject* obj) { |
112 uword ptr = reinterpret_cast<uword>(p); | 116 uword ptr = reinterpret_cast<uword>(p); |
113 ASSERT(obj->IsHeapObject()); | 117 ASSERT(obj->IsHeapObject()); |
114 ASSERT(!scavenger_->Contains(ptr)); | 118 ASSERT(!scavenger_->Contains(ptr)); |
115 ASSERT(!heap_->CodeContains(ptr)); | 119 ASSERT(!heap_->CodeContains(ptr)); |
116 ASSERT(heap_->Contains(ptr)); | 120 ASSERT(heap_->Contains(ptr)); |
117 // If the newly written object is not a new object, drop it immediately. | 121 // If the newly written object is not a new object, drop it immediately. |
118 if (!obj->IsNewObject()) return; | 122 if (!obj->IsNewObject()) return; |
119 isolate()->store_buffer()->AddPointer(ptr); | 123 isolate()->store_buffer()->AddPointer(ptr); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
168 // Not a survivor of a previous scavenge. Just copy the object into the | 172 // Not a survivor of a previous scavenge. Just copy the object into the |
169 // to space. | 173 // to space. |
170 new_addr = scavenger_->TryAllocate(size); | 174 new_addr = scavenger_->TryAllocate(size); |
171 } else { | 175 } else { |
172 // TODO(iposva): Experiment with less aggressive promotion. For example | 176 // TODO(iposva): Experiment with less aggressive promotion. For example |
173 // a coin toss determines if an object is promoted or whether it should | 177 // a coin toss determines if an object is promoted or whether it should |
174 // survive in this generation. | 178 // survive in this generation. |
175 // | 179 // |
176 // This object is a survivor of a previous scavenge. Attempt to promote | 180 // This object is a survivor of a previous scavenge. Attempt to promote |
177 // the object. | 181 // the object. |
178 new_addr = heap_->TryAllocate(size, Heap::kOld); | 182 new_addr = heap_->old_space()->TryAllocate(size, |
183 HeapPage::kData, | |
184 growth_policy_); | |
179 if (new_addr != 0) { | 185 if (new_addr != 0) { |
180 // If promotion succeeded then we need to remember it so that it can | 186 // If promotion succeeded then we need to remember it so that it can |
181 // be traversed later. | 187 // be traversed later. |
188 bytes_promoted_ += size; | |
Ivan Posva
2012/11/14 21:07:18
Comment above has been separated from the code it
cshapiro
2012/11/14 22:47:36
I do not follow. Do you want me to transpose line
| |
182 scavenger_->PushToPromotedStack(new_addr); | 189 scavenger_->PushToPromotedStack(new_addr); |
190 } else if (!scavenger_->had_promotion_failure_) { | |
191 // Retry. | |
Ivan Posva
2012/11/14 21:07:18
Please expand on the "Retry." comment by briefly e
| |
192 scavenger_->had_promotion_failure_ = true; | |
193 growth_policy_ = PageSpace::kForceGrowth; | |
194 new_addr = heap_->old_space()->TryAllocate(size, | |
195 HeapPage::kData, | |
196 growth_policy_); | |
197 if (new_addr != 0) { | |
198 bytes_promoted_ += size; | |
199 scavenger_->PushToPromotedStack(new_addr); | |
200 } | |
Ivan Posva
2012/11/14 21:07:18
What if new_addr is 0?
cshapiro
2012/11/14 22:47:36
Same thing that always happens. We trip the asser
| |
183 } else { | 201 } else { |
202 ASSERT(growth_policy_ == PageSpace::kForceGrowth); | |
184 // Promotion did not succeed. Copy into the to space instead. | 203 // Promotion did not succeed. Copy into the to space instead. |
185 scavenger_->had_promotion_failure_ = true; | |
186 new_addr = scavenger_->TryAllocate(size); | 204 new_addr = scavenger_->TryAllocate(size); |
187 } | 205 } |
188 } | 206 } |
189 // During a scavenge we always succeed to at least copy all of the | 207 // During a scavenge we always succeed to at least copy all of the |
190 // current objects to the to space. | 208 // current objects to the to space. |
191 ASSERT(new_addr != 0); | 209 ASSERT(new_addr != 0); |
192 // Copy the object to the new location. | 210 // Copy the object to the new location. |
193 memmove(reinterpret_cast<void*>(new_addr), | 211 memmove(reinterpret_cast<void*>(new_addr), |
194 reinterpret_cast<void*>(raw_addr), | 212 reinterpret_cast<void*>(raw_addr), |
195 size); | 213 size); |
196 // Remember forwarding address. | 214 // Remember forwarding address. |
197 ForwardTo(raw_addr, new_addr); | 215 ForwardTo(raw_addr, new_addr); |
198 } | 216 } |
199 // Update the reference. | 217 // Update the reference. |
200 RawObject* new_obj = RawObject::FromAddr(new_addr); | 218 RawObject* new_obj = RawObject::FromAddr(new_addr); |
201 *p = new_obj; | 219 *p = new_obj; |
202 // Update the store buffer as needed. | 220 // Update the store buffer as needed. |
203 if (visiting_old_pointers_) { | 221 if (visiting_old_pointers_) { |
204 UpdateStoreBuffer(p, new_obj); | 222 UpdateStoreBuffer(p, new_obj); |
205 } | 223 } |
206 } | 224 } |
207 | 225 |
208 Scavenger* scavenger_; | 226 Scavenger* scavenger_; |
209 Heap* heap_; | 227 Heap* heap_; |
210 Heap* vm_heap_; | 228 Heap* vm_heap_; |
211 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; | 229 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; |
212 DelaySet delay_set_; | 230 DelaySet delay_set_; |
213 GrowableArray<RawObject*> delayed_weak_stack_; | 231 GrowableArray<RawObject*> delayed_weak_stack_; |
232 PageSpace::GrowthPolicy growth_policy_; | |
233 intptr_t bytes_promoted_; | |
214 | 234 |
215 bool visiting_old_pointers_; | 235 bool visiting_old_pointers_; |
216 bool in_scavenge_pointer_; | 236 bool in_scavenge_pointer_; |
217 | 237 |
218 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); | 238 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); |
219 }; | 239 }; |
220 | 240 |
221 | 241 |
222 class ScavengerWeakVisitor : public HandleVisitor { | 242 class ScavengerWeakVisitor : public HandleVisitor { |
223 public: | 243 public: |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
608 OS::PrintErr("Verifying before Scavenge..."); | 628 OS::PrintErr("Verifying before Scavenge..."); |
609 heap_->Verify(); | 629 heap_->Verify(); |
610 OS::PrintErr(" done.\n"); | 630 OS::PrintErr(" done.\n"); |
611 } | 631 } |
612 | 632 |
613 if (FLAG_verbose_gc) { | 633 if (FLAG_verbose_gc) { |
614 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); | 634 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); |
615 } | 635 } |
616 Timer timer(FLAG_verbose_gc, "Scavenge"); | 636 Timer timer(FLAG_verbose_gc, "Scavenge"); |
617 timer.Start(); | 637 timer.Start(); |
638 | |
639 intptr_t in_use_before = in_use(); | |
640 | |
618 // Setup the visitor and run a scavenge. | 641 // Setup the visitor and run a scavenge. |
619 ScavengerVisitor visitor(isolate, this); | 642 ScavengerVisitor visitor(isolate, this); |
620 Prologue(isolate, invoke_api_callbacks); | 643 Prologue(isolate, invoke_api_callbacks); |
621 IterateRoots(isolate, &visitor, !invoke_api_callbacks); | 644 IterateRoots(isolate, &visitor, !invoke_api_callbacks); |
622 ProcessToSpace(&visitor); | 645 ProcessToSpace(&visitor); |
623 IterateWeakReferences(isolate, &visitor); | 646 IterateWeakReferences(isolate, &visitor); |
624 ScavengerWeakVisitor weak_visitor(this); | 647 ScavengerWeakVisitor weak_visitor(this); |
625 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); | 648 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); |
626 visitor.Finalize(); | 649 visitor.Finalize(); |
627 ProcessPeerReferents(); | 650 ProcessPeerReferents(); |
628 Epilogue(isolate, invoke_api_callbacks); | 651 Epilogue(isolate, invoke_api_callbacks); |
629 timer.Stop(); | 652 timer.Stop(); |
653 | |
630 if (FLAG_verbose_gc) { | 654 if (FLAG_verbose_gc) { |
631 OS::PrintErr("Scavenge[%d]: %"Pd64"us\n", | 655 const intptr_t KB2 = KB / 2; |
656 OS::PrintErr("Scavenge[%d]: %"Pd64"us (%"Pd"K -> %"Pd"K, %"Pd"K)\n" | |
657 "Promoted %"Pd"K\n", | |
632 count_, | 658 count_, |
633 timer.TotalElapsedTime()); | 659 timer.TotalElapsedTime(), |
660 (in_use_before + KB2) / KB, | |
661 (in_use() + KB2) / KB, | |
662 (capacity() + KB2) / KB, | |
663 (visitor.bytes_promoted() + KB2) / KB); | |
634 } | 664 } |
635 | 665 |
636 if (FLAG_verify_after_gc) { | 666 if (FLAG_verify_after_gc) { |
637 OS::PrintErr("Verifying after Scavenge..."); | 667 OS::PrintErr("Verifying after Scavenge..."); |
638 heap_->Verify(); | 668 heap_->Verify(); |
639 OS::PrintErr(" done.\n"); | 669 OS::PrintErr(" done.\n"); |
640 } | 670 } |
641 | 671 |
642 count_++; | 672 count_++; |
643 // Done scavenging. Reset the marker. | 673 // Done scavenging. Reset the marker. |
(...skipping 21 matching lines...) Expand all Loading... | |
665 PeerTable::iterator it = peer_table_.find(raw_obj); | 695 PeerTable::iterator it = peer_table_.find(raw_obj); |
666 return (it == peer_table_.end()) ? NULL : it->second; | 696 return (it == peer_table_.end()) ? NULL : it->second; |
667 } | 697 } |
668 | 698 |
669 | 699 |
670 int64_t Scavenger::PeerCount() const { | 700 int64_t Scavenger::PeerCount() const { |
671 return static_cast<int64_t>(peer_table_.size()); | 701 return static_cast<int64_t>(peer_table_.size()); |
672 } | 702 } |
673 | 703 |
674 } // namespace dart | 704 } // namespace dart |
OLD | NEW |