OLD | NEW |
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <map> | 8 #include <map> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 | 66 |
67 | 67 |
68 class ScavengerVisitor : public ObjectPointerVisitor { | 68 class ScavengerVisitor : public ObjectPointerVisitor { |
69 public: | 69 public: |
70 explicit ScavengerVisitor(Isolate* isolate, Scavenger* scavenger) | 70 explicit ScavengerVisitor(Isolate* isolate, Scavenger* scavenger) |
71 : ObjectPointerVisitor(isolate), | 71 : ObjectPointerVisitor(isolate), |
72 scavenger_(scavenger), | 72 scavenger_(scavenger), |
73 heap_(scavenger->heap_), | 73 heap_(scavenger->heap_), |
74 vm_heap_(Dart::vm_isolate()->heap()), | 74 vm_heap_(Dart::vm_isolate()->heap()), |
75 delayed_weak_stack_(), | 75 delayed_weak_stack_(), |
| 76 growth_policy_(PageSpace::kControlGrowth), |
| 77 bytes_promoted_(0), |
76 visiting_old_pointers_(false), | 78 visiting_old_pointers_(false), |
77 in_scavenge_pointer_(false) {} | 79 in_scavenge_pointer_(false) {} |
78 | 80 |
79 void VisitPointers(RawObject** first, RawObject** last) { | 81 void VisitPointers(RawObject** first, RawObject** last) { |
80 for (RawObject** current = first; current <= last; current++) { | 82 for (RawObject** current = first; current <= last; current++) { |
81 ScavengePointer(current); | 83 ScavengePointer(current); |
82 } | 84 } |
83 } | 85 } |
84 | 86 |
85 GrowableArray<RawObject*>* DelayedWeakStack() { | 87 GrowableArray<RawObject*>* DelayedWeakStack() { |
(...skipping 14 matching lines...) Expand all Loading... |
100 delay_set_.insert(std::make_pair(raw_key, raw_weak)); | 102 delay_set_.insert(std::make_pair(raw_key, raw_weak)); |
101 } | 103 } |
102 | 104 |
103 void Finalize() { | 105 void Finalize() { |
104 DelaySet::iterator it = delay_set_.begin(); | 106 DelaySet::iterator it = delay_set_.begin(); |
105 for (; it != delay_set_.end(); ++it) { | 107 for (; it != delay_set_.end(); ++it) { |
106 WeakProperty::Clear(it->second); | 108 WeakProperty::Clear(it->second); |
107 } | 109 } |
108 } | 110 } |
109 | 111 |
| 112 intptr_t bytes_promoted() { return bytes_promoted_; } |
| 113 |
110 private: | 114 private: |
111 void UpdateStoreBuffer(RawObject** p, RawObject* obj) { | 115 void UpdateStoreBuffer(RawObject** p, RawObject* obj) { |
112 uword ptr = reinterpret_cast<uword>(p); | 116 uword ptr = reinterpret_cast<uword>(p); |
113 ASSERT(obj->IsHeapObject()); | 117 ASSERT(obj->IsHeapObject()); |
114 ASSERT(!scavenger_->Contains(ptr)); | 118 ASSERT(!scavenger_->Contains(ptr)); |
115 ASSERT(!heap_->CodeContains(ptr)); | 119 ASSERT(!heap_->CodeContains(ptr)); |
116 ASSERT(heap_->Contains(ptr)); | 120 ASSERT(heap_->Contains(ptr)); |
117 // If the newly written object is not a new object, drop it immediately. | 121 // If the newly written object is not a new object, drop it immediately. |
118 if (!obj->IsNewObject()) return; | 122 if (!obj->IsNewObject()) return; |
119 isolate()->store_buffer()->AddPointer(ptr); | 123 isolate()->store_buffer()->AddPointer(ptr); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
168 // Not a survivor of a previous scavenge. Just copy the object into the | 172 // Not a survivor of a previous scavenge. Just copy the object into the |
169 // to space. | 173 // to space. |
170 new_addr = scavenger_->TryAllocate(size); | 174 new_addr = scavenger_->TryAllocate(size); |
171 } else { | 175 } else { |
172 // TODO(iposva): Experiment with less aggressive promotion. For example | 176 // TODO(iposva): Experiment with less aggressive promotion. For example |
173 // a coin toss determines if an object is promoted or whether it should | 177 // a coin toss determines if an object is promoted or whether it should |
174 // survive in this generation. | 178 // survive in this generation. |
175 // | 179 // |
176 // This object is a survivor of a previous scavenge. Attempt to promote | 180 // This object is a survivor of a previous scavenge. Attempt to promote |
177 // the object. | 181 // the object. |
178 new_addr = heap_->TryAllocate(size, Heap::kOld); | 182 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
179 if (new_addr != 0) { | 183 if (new_addr != 0) { |
180 // If promotion succeeded then we need to remember it so that it can | 184 // If promotion succeeded then we need to remember it so that it can |
181 // be traversed later. | 185 // be traversed later. |
182 scavenger_->PushToPromotedStack(new_addr); | 186 scavenger_->PushToPromotedStack(new_addr); |
| 187 bytes_promoted_ += size; |
| 188 } else if (!scavenger_->had_promotion_failure_) { |
| 189 // Signal a promotion failure and set the growth policy for |
| 190 // this, and all subsequent promotion allocations, to force |
| 191 // growth. |
| 192 scavenger_->had_promotion_failure_ = true; |
| 193 growth_policy_ = PageSpace::kForceGrowth; |
| 194 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
| 195 if (new_addr != 0) { |
| 196 scavenger_->PushToPromotedStack(new_addr); |
| 197 bytes_promoted_ += size; |
| 198 } else { |
| 199 // Promotion did not succeed. Copy into the to space |
| 200 // instead. |
| 201 new_addr = scavenger_->TryAllocate(size); |
| 202 } |
183 } else { | 203 } else { |
| 204 ASSERT(growth_policy_ == PageSpace::kForceGrowth); |
184 // Promotion did not succeed. Copy into the to space instead. | 205 // Promotion did not succeed. Copy into the to space instead. |
185 scavenger_->had_promotion_failure_ = true; | |
186 new_addr = scavenger_->TryAllocate(size); | 206 new_addr = scavenger_->TryAllocate(size); |
187 } | 207 } |
188 } | 208 } |
189 // During a scavenge we always succeed to at least copy all of the | 209 // During a scavenge we always succeed to at least copy all of the |
190 // current objects to the to space. | 210 // current objects to the to space. |
191 ASSERT(new_addr != 0); | 211 ASSERT(new_addr != 0); |
192 // Copy the object to the new location. | 212 // Copy the object to the new location. |
193 memmove(reinterpret_cast<void*>(new_addr), | 213 memmove(reinterpret_cast<void*>(new_addr), |
194 reinterpret_cast<void*>(raw_addr), | 214 reinterpret_cast<void*>(raw_addr), |
195 size); | 215 size); |
196 // Remember forwarding address. | 216 // Remember forwarding address. |
197 ForwardTo(raw_addr, new_addr); | 217 ForwardTo(raw_addr, new_addr); |
198 } | 218 } |
199 // Update the reference. | 219 // Update the reference. |
200 RawObject* new_obj = RawObject::FromAddr(new_addr); | 220 RawObject* new_obj = RawObject::FromAddr(new_addr); |
201 *p = new_obj; | 221 *p = new_obj; |
202 // Update the store buffer as needed. | 222 // Update the store buffer as needed. |
203 if (visiting_old_pointers_) { | 223 if (visiting_old_pointers_) { |
204 UpdateStoreBuffer(p, new_obj); | 224 UpdateStoreBuffer(p, new_obj); |
205 } | 225 } |
206 } | 226 } |
207 | 227 |
208 Scavenger* scavenger_; | 228 Scavenger* scavenger_; |
209 Heap* heap_; | 229 Heap* heap_; |
210 Heap* vm_heap_; | 230 Heap* vm_heap_; |
211 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; | 231 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; |
212 DelaySet delay_set_; | 232 DelaySet delay_set_; |
213 GrowableArray<RawObject*> delayed_weak_stack_; | 233 GrowableArray<RawObject*> delayed_weak_stack_; |
| 234 PageSpace::GrowthPolicy growth_policy_; |
| 235 intptr_t bytes_promoted_; |
214 | 236 |
215 bool visiting_old_pointers_; | 237 bool visiting_old_pointers_; |
216 bool in_scavenge_pointer_; | 238 bool in_scavenge_pointer_; |
217 | 239 |
218 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); | 240 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); |
219 }; | 241 }; |
220 | 242 |
221 | 243 |
222 class ScavengerWeakVisitor : public HandleVisitor { | 244 class ScavengerWeakVisitor : public HandleVisitor { |
223 public: | 245 public: |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
608 OS::PrintErr("Verifying before Scavenge..."); | 630 OS::PrintErr("Verifying before Scavenge..."); |
609 heap_->Verify(); | 631 heap_->Verify(); |
610 OS::PrintErr(" done.\n"); | 632 OS::PrintErr(" done.\n"); |
611 } | 633 } |
612 | 634 |
613 if (FLAG_verbose_gc) { | 635 if (FLAG_verbose_gc) { |
614 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); | 636 OS::PrintErr("Start scavenge for %s collection\n", gc_reason); |
615 } | 637 } |
616 Timer timer(FLAG_verbose_gc, "Scavenge"); | 638 Timer timer(FLAG_verbose_gc, "Scavenge"); |
617 timer.Start(); | 639 timer.Start(); |
| 640 |
| 641 intptr_t in_use_before = in_use(); |
| 642 |
618 // Setup the visitor and run a scavenge. | 643 // Setup the visitor and run a scavenge. |
619 ScavengerVisitor visitor(isolate, this); | 644 ScavengerVisitor visitor(isolate, this); |
620 Prologue(isolate, invoke_api_callbacks); | 645 Prologue(isolate, invoke_api_callbacks); |
621 IterateRoots(isolate, &visitor, !invoke_api_callbacks); | 646 IterateRoots(isolate, &visitor, !invoke_api_callbacks); |
622 ProcessToSpace(&visitor); | 647 ProcessToSpace(&visitor); |
623 IterateWeakReferences(isolate, &visitor); | 648 IterateWeakReferences(isolate, &visitor); |
624 ScavengerWeakVisitor weak_visitor(this); | 649 ScavengerWeakVisitor weak_visitor(this); |
625 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); | 650 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); |
626 visitor.Finalize(); | 651 visitor.Finalize(); |
627 ProcessPeerReferents(); | 652 ProcessPeerReferents(); |
628 Epilogue(isolate, invoke_api_callbacks); | 653 Epilogue(isolate, invoke_api_callbacks); |
629 timer.Stop(); | 654 timer.Stop(); |
| 655 |
630 if (FLAG_verbose_gc) { | 656 if (FLAG_verbose_gc) { |
631 OS::PrintErr("Scavenge[%d]: %"Pd64"us\n", | 657 const intptr_t KB2 = KB / 2; |
| 658 OS::PrintErr("Scavenge[%d]: %"Pd64"us (%"Pd"K -> %"Pd"K, %"Pd"K)\n" |
| 659 "Promoted %"Pd"K\n", |
632 count_, | 660 count_, |
633 timer.TotalElapsedTime()); | 661 timer.TotalElapsedTime(), |
| 662 (in_use_before + KB2) / KB, |
| 663 (in_use() + KB2) / KB, |
| 664 (capacity() + KB2) / KB, |
| 665 (visitor.bytes_promoted() + KB2) / KB); |
634 } | 666 } |
635 | 667 |
636 if (FLAG_verify_after_gc) { | 668 if (FLAG_verify_after_gc) { |
637 OS::PrintErr("Verifying after Scavenge..."); | 669 OS::PrintErr("Verifying after Scavenge..."); |
638 heap_->Verify(); | 670 heap_->Verify(); |
639 OS::PrintErr(" done.\n"); | 671 OS::PrintErr(" done.\n"); |
640 } | 672 } |
641 | 673 |
642 count_++; | 674 count_++; |
643 // Done scavenging. Reset the marker. | 675 // Done scavenging. Reset the marker. |
(...skipping 21 matching lines...) Expand all Loading... |
665 PeerTable::iterator it = peer_table_.find(raw_obj); | 697 PeerTable::iterator it = peer_table_.find(raw_obj); |
666 return (it == peer_table_.end()) ? NULL : it->second; | 698 return (it == peer_table_.end()) ? NULL : it->second; |
667 } | 699 } |
668 | 700 |
669 | 701 |
670 int64_t Scavenger::PeerCount() const { | 702 int64_t Scavenger::PeerCount() const { |
671 return static_cast<int64_t>(peer_table_.size()); | 703 return static_cast<int64_t>(peer_table_.size()); |
672 } | 704 } |
673 | 705 |
674 } // namespace dart | 706 } // namespace dart |
OLD | NEW |