OLD | NEW |
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <map> | 8 #include <map> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
171 delayed_weak_stack_.Add(it->second); | 171 delayed_weak_stack_.Add(it->second); |
172 } | 172 } |
173 delay_set_.erase(ret.first, ret.second); | 173 delay_set_.erase(ret.first, ret.second); |
174 } | 174 } |
175 intptr_t size = raw_obj->Size(); | 175 intptr_t size = raw_obj->Size(); |
176 // Check whether object should be promoted. | 176 // Check whether object should be promoted. |
177 if (scavenger_->survivor_end_ <= raw_addr) { | 177 if (scavenger_->survivor_end_ <= raw_addr) { |
178 // Not a survivor of a previous scavenge. Just copy the object into the | 178 // Not a survivor of a previous scavenge. Just copy the object into the |
179 // to space. | 179 // to space. |
180 new_addr = scavenger_->TryAllocate(size); | 180 new_addr = scavenger_->TryAllocate(size); |
181 if (HeapTrace::is_enabled()) { | |
182 heap_->trace()->TraceCopy(raw_addr, new_addr); | |
183 } | |
184 } else { | 181 } else { |
185 // TODO(iposva): Experiment with less aggressive promotion. For example | 182 // TODO(iposva): Experiment with less aggressive promotion. For example |
186 // a coin toss determines if an object is promoted or whether it should | 183 // a coin toss determines if an object is promoted or whether it should |
187 // survive in this generation. | 184 // survive in this generation. |
188 // | 185 // |
189 // This object is a survivor of a previous scavenge. Attempt to promote | 186 // This object is a survivor of a previous scavenge. Attempt to promote |
190 // the object. | 187 // the object. |
191 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); | 188 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
192 if (new_addr != 0) { | 189 if (new_addr != 0) { |
193 // If promotion succeeded then we need to remember it so that it can | 190 // If promotion succeeded then we need to remember it so that it can |
194 // be traversed later. | 191 // be traversed later. |
195 scavenger_->PushToPromotedStack(new_addr); | 192 scavenger_->PushToPromotedStack(new_addr); |
196 bytes_promoted_ += size; | 193 bytes_promoted_ += size; |
197 if (HeapTrace::is_enabled()) { | |
198 heap_->trace()->TracePromotion(raw_addr, new_addr); | |
199 } | |
200 } else if (!scavenger_->had_promotion_failure_) { | 194 } else if (!scavenger_->had_promotion_failure_) { |
201 // Signal a promotion failure and set the growth policy for | 195 // Signal a promotion failure and set the growth policy for |
202 // this, and all subsequent promotion allocations, to force | 196 // this, and all subsequent promotion allocations, to force |
203 // growth. | 197 // growth. |
204 scavenger_->had_promotion_failure_ = true; | 198 scavenger_->had_promotion_failure_ = true; |
205 growth_policy_ = PageSpace::kForceGrowth; | 199 growth_policy_ = PageSpace::kForceGrowth; |
206 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); | 200 new_addr = heap_->TryAllocate(size, Heap::kOld, growth_policy_); |
207 if (new_addr != 0) { | 201 if (new_addr != 0) { |
208 scavenger_->PushToPromotedStack(new_addr); | 202 scavenger_->PushToPromotedStack(new_addr); |
209 bytes_promoted_ += size; | 203 bytes_promoted_ += size; |
210 if (HeapTrace::is_enabled()) { | |
211 heap_->trace()->TracePromotion(raw_addr, new_addr); | |
212 } | |
213 } else { | 204 } else { |
214 // Promotion did not succeed. Copy into the to space | 205 // Promotion did not succeed. Copy into the to space |
215 // instead. | 206 // instead. |
216 new_addr = scavenger_->TryAllocate(size); | 207 new_addr = scavenger_->TryAllocate(size); |
217 if (HeapTrace::is_enabled()) { | |
218 heap_->trace()->TraceCopy(raw_addr, new_addr); | |
219 } | |
220 } | 208 } |
221 } else { | 209 } else { |
222 ASSERT(growth_policy_ == PageSpace::kForceGrowth); | 210 ASSERT(growth_policy_ == PageSpace::kForceGrowth); |
223 // Promotion did not succeed. Copy into the to space instead. | 211 // Promotion did not succeed. Copy into the to space instead. |
224 new_addr = scavenger_->TryAllocate(size); | 212 new_addr = scavenger_->TryAllocate(size); |
225 if (HeapTrace::is_enabled()) { | |
226 heap_->trace()->TraceCopy(raw_addr, new_addr); | |
227 } | |
228 } | 213 } |
229 } | 214 } |
230 // During a scavenge we always succeed to at least copy all of the | 215 // During a scavenge we always succeed to at least copy all of the |
231 // current objects to the to space. | 216 // current objects to the to space. |
232 ASSERT(new_addr != 0); | 217 ASSERT(new_addr != 0); |
233 // Copy the object to the new location. | 218 // Copy the object to the new location. |
234 memmove(reinterpret_cast<void*>(new_addr), | 219 memmove(reinterpret_cast<void*>(new_addr), |
235 reinterpret_cast<void*>(raw_addr), | 220 reinterpret_cast<void*>(raw_addr), |
236 size); | 221 size); |
237 // Remember forwarding address. | 222 // Remember forwarding address. |
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
634 had_promotion_failure_ = false; | 619 had_promotion_failure_ = false; |
635 Isolate* isolate = Isolate::Current(); | 620 Isolate* isolate = Isolate::Current(); |
636 NoHandleScope no_handles(isolate); | 621 NoHandleScope no_handles(isolate); |
637 | 622 |
638 if (FLAG_verify_before_gc) { | 623 if (FLAG_verify_before_gc) { |
639 OS::PrintErr("Verifying before Scavenge..."); | 624 OS::PrintErr("Verifying before Scavenge..."); |
640 heap_->Verify(); | 625 heap_->Verify(); |
641 OS::PrintErr(" done.\n"); | 626 OS::PrintErr(" done.\n"); |
642 } | 627 } |
643 | 628 |
644 uword prev_first_obj_start = FirstObjectStart(); | |
645 uword prev_top_addr = *(TopAddress()); | |
646 | |
647 // Setup the visitor and run a scavenge. | 629 // Setup the visitor and run a scavenge. |
648 ScavengerVisitor visitor(isolate, this); | 630 ScavengerVisitor visitor(isolate, this); |
649 Prologue(isolate, invoke_api_callbacks); | 631 Prologue(isolate, invoke_api_callbacks); |
650 IterateRoots(isolate, &visitor, !invoke_api_callbacks); | 632 IterateRoots(isolate, &visitor, !invoke_api_callbacks); |
651 int64_t start = OS::GetCurrentTimeMicros(); | 633 int64_t start = OS::GetCurrentTimeMicros(); |
652 ProcessToSpace(&visitor); | 634 ProcessToSpace(&visitor); |
653 int64_t middle = OS::GetCurrentTimeMicros(); | 635 int64_t middle = OS::GetCurrentTimeMicros(); |
654 IterateWeakReferences(isolate, &visitor); | 636 IterateWeakReferences(isolate, &visitor); |
655 ScavengerWeakVisitor weak_visitor(this); | 637 ScavengerWeakVisitor weak_visitor(this); |
656 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); | 638 IterateWeakRoots(isolate, &weak_visitor, invoke_api_callbacks); |
657 visitor.Finalize(); | 639 visitor.Finalize(); |
658 ProcessPeerReferents(); | 640 ProcessPeerReferents(); |
659 int64_t end = OS::GetCurrentTimeMicros(); | 641 int64_t end = OS::GetCurrentTimeMicros(); |
660 heap_->RecordTime(kProcessToSpace, middle - start); | 642 heap_->RecordTime(kProcessToSpace, middle - start); |
661 heap_->RecordTime(kIterateWeaks, end - middle); | 643 heap_->RecordTime(kIterateWeaks, end - middle); |
662 Epilogue(isolate, invoke_api_callbacks); | 644 Epilogue(isolate, invoke_api_callbacks); |
663 | 645 |
664 if (FLAG_verify_after_gc) { | 646 if (FLAG_verify_after_gc) { |
665 OS::PrintErr("Verifying after Scavenge..."); | 647 OS::PrintErr("Verifying after Scavenge..."); |
666 heap_->Verify(); | 648 heap_->Verify(); |
667 OS::PrintErr(" done.\n"); | 649 OS::PrintErr(" done.\n"); |
668 } | 650 } |
669 | 651 |
670 if (HeapTrace::is_enabled()) { | |
671 heap_->trace()->TraceDeathRange(prev_first_obj_start, prev_top_addr); | |
672 } | |
673 | |
674 // Done scavenging. Reset the marker. | 652 // Done scavenging. Reset the marker. |
675 ASSERT(scavenging_); | 653 ASSERT(scavenging_); |
676 scavenging_ = false; | 654 scavenging_ = false; |
677 } | 655 } |
678 | 656 |
679 | 657 |
680 void Scavenger::WriteProtect(bool read_only) { | 658 void Scavenger::WriteProtect(bool read_only) { |
681 space_->Protect( | 659 space_->Protect( |
682 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite); | 660 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite); |
683 } | 661 } |
(...skipping 12 matching lines...) Expand all Loading... |
696 PeerTable::iterator it = peer_table_.find(raw_obj); | 674 PeerTable::iterator it = peer_table_.find(raw_obj); |
697 return (it == peer_table_.end()) ? NULL : it->second; | 675 return (it == peer_table_.end()) ? NULL : it->second; |
698 } | 676 } |
699 | 677 |
700 | 678 |
701 int64_t Scavenger::PeerCount() const { | 679 int64_t Scavenger::PeerCount() const { |
702 return static_cast<int64_t>(peer_table_.size()); | 680 return static_cast<int64_t>(peer_table_.size()); |
703 } | 681 } |
704 | 682 |
705 } // namespace dart | 683 } // namespace dart |
OLD | NEW |