| OLD | NEW |
| 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <map> | 8 #include <map> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 248 intptr_t bytes_promoted_; | 248 intptr_t bytes_promoted_; |
| 249 RawObject* visiting_old_object_; | 249 RawObject* visiting_old_object_; |
| 250 bool in_scavenge_pointer_; | 250 bool in_scavenge_pointer_; |
| 251 | 251 |
| 252 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); | 252 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); |
| 253 }; | 253 }; |
| 254 | 254 |
| 255 | 255 |
| 256 class ScavengerWeakVisitor : public HandleVisitor { | 256 class ScavengerWeakVisitor : public HandleVisitor { |
| 257 public: | 257 public: |
| 258 // 'prologue_weak_were_strong' is currently only used for sanity checking. | 258 explicit ScavengerWeakVisitor(Scavenger* scavenger) |
| 259 explicit ScavengerWeakVisitor(Scavenger* scavenger, | |
| 260 bool prologue_weak_were_strong) | |
| 261 : HandleVisitor(Thread::Current()), | 259 : HandleVisitor(Thread::Current()), |
| 262 scavenger_(scavenger), | 260 scavenger_(scavenger) { |
| 263 prologue_weak_were_strong_(prologue_weak_were_strong) { | |
| 264 ASSERT(scavenger->heap_->isolate() == Thread::Current()->isolate()); | 261 ASSERT(scavenger->heap_->isolate() == Thread::Current()->isolate()); |
| 265 } | 262 } |
| 266 | 263 |
| 267 void VisitHandle(uword addr) { | 264 void VisitHandle(uword addr) { |
| 268 FinalizablePersistentHandle* handle = | 265 FinalizablePersistentHandle* handle = |
| 269 reinterpret_cast<FinalizablePersistentHandle*>(addr); | 266 reinterpret_cast<FinalizablePersistentHandle*>(addr); |
| 270 RawObject** p = handle->raw_addr(); | 267 RawObject** p = handle->raw_addr(); |
| 271 if (scavenger_->IsUnreachable(p)) { | 268 if (scavenger_->IsUnreachable(p)) { |
| 272 ASSERT(!handle->IsPrologueWeakPersistent() || | |
| 273 !prologue_weak_were_strong_); | |
| 274 handle->UpdateUnreachable(thread()->isolate()); | 269 handle->UpdateUnreachable(thread()->isolate()); |
| 275 } else { | 270 } else { |
| 276 handle->UpdateRelocated(thread()->isolate()); | 271 handle->UpdateRelocated(thread()->isolate()); |
| 277 } | 272 } |
| 278 } | 273 } |
| 279 | 274 |
| 280 private: | 275 private: |
| 281 Scavenger* scavenger_; | 276 Scavenger* scavenger_; |
| 282 bool prologue_weak_were_strong_; | |
| 283 | 277 |
| 284 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor); | 278 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor); |
| 285 }; | 279 }; |
| 286 | 280 |
| 287 | 281 |
| 288 // Visitor used to verify that all old->new references have been added to the | 282 // Visitor used to verify that all old->new references have been added to the |
| 289 // StoreBuffers. | 283 // StoreBuffers. |
| 290 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor { | 284 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor { |
| 291 public: | 285 public: |
| 292 VerifyStoreBufferPointerVisitor(Isolate* isolate, | 286 VerifyStoreBufferPointerVisitor(Isolate* isolate, |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 554 ObjectIdRing* ring = isolate->object_id_ring(); | 548 ObjectIdRing* ring = isolate->object_id_ring(); |
| 555 if (ring == NULL) { | 549 if (ring == NULL) { |
| 556 // --gc_at_alloc can get us here before the ring has been initialized. | 550 // --gc_at_alloc can get us here before the ring has been initialized. |
| 557 ASSERT(FLAG_gc_at_alloc); | 551 ASSERT(FLAG_gc_at_alloc); |
| 558 return; | 552 return; |
| 559 } | 553 } |
| 560 ring->VisitPointers(visitor); | 554 ring->VisitPointers(visitor); |
| 561 } | 555 } |
| 562 | 556 |
| 563 | 557 |
| 564 void Scavenger::IterateRoots(Isolate* isolate, | 558 void Scavenger::IterateRoots(Isolate* isolate, ScavengerVisitor* visitor) { |
| 565 ScavengerVisitor* visitor, | |
| 566 bool visit_prologue_weak_persistent_handles) { | |
| 567 int64_t start = OS::GetCurrentTimeMicros(); | 559 int64_t start = OS::GetCurrentTimeMicros(); |
| 568 isolate->VisitObjectPointers(visitor, | 560 isolate->VisitObjectPointers(visitor, |
| 569 visit_prologue_weak_persistent_handles, | |
| 570 StackFrameIterator::kDontValidateFrames); | 561 StackFrameIterator::kDontValidateFrames); |
| 571 int64_t middle = OS::GetCurrentTimeMicros(); | 562 int64_t middle = OS::GetCurrentTimeMicros(); |
| 572 IterateStoreBuffers(isolate, visitor); | 563 IterateStoreBuffers(isolate, visitor); |
| 573 IterateObjectIdTable(isolate, visitor); | 564 IterateObjectIdTable(isolate, visitor); |
| 574 int64_t end = OS::GetCurrentTimeMicros(); | 565 int64_t end = OS::GetCurrentTimeMicros(); |
| 575 heap_->RecordData(kToKBAfterStoreBuffer, RoundWordsToKB(UsedInWords())); | 566 heap_->RecordData(kToKBAfterStoreBuffer, RoundWordsToKB(UsedInWords())); |
| 576 heap_->RecordTime(kVisitIsolateRoots, middle - start); | 567 heap_->RecordTime(kVisitIsolateRoots, middle - start); |
| 577 heap_->RecordTime(kIterateStoreBuffers, end - middle); | 568 heap_->RecordTime(kIterateStoreBuffers, end - middle); |
| 578 } | 569 } |
| 579 | 570 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 593 uword header = *reinterpret_cast<uword*>(raw_addr); | 584 uword header = *reinterpret_cast<uword*>(raw_addr); |
| 594 if (IsForwarding(header)) { | 585 if (IsForwarding(header)) { |
| 595 uword new_addr = ForwardedAddr(header); | 586 uword new_addr = ForwardedAddr(header); |
| 596 *p = RawObject::FromAddr(new_addr); | 587 *p = RawObject::FromAddr(new_addr); |
| 597 return false; | 588 return false; |
| 598 } | 589 } |
| 599 return true; | 590 return true; |
| 600 } | 591 } |
| 601 | 592 |
| 602 | 593 |
| 603 void Scavenger::IterateWeakReferences(Isolate* isolate, | 594 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { |
| 604 ScavengerVisitor* visitor) { | 595 isolate->VisitWeakPersistentHandles(visitor); |
| 605 ApiState* state = isolate->api_state(); | |
| 606 ASSERT(state != NULL); | |
| 607 while (true) { | |
| 608 WeakReferenceSet* queue = state->delayed_weak_reference_sets(); | |
| 609 if (queue == NULL) { | |
| 610 // The delay queue is empty therefore no clean-up is required. | |
| 611 return; | |
| 612 } | |
| 613 state->set_delayed_weak_reference_sets(NULL); | |
| 614 while (queue != NULL) { | |
| 615 WeakReferenceSet* reference_set = WeakReferenceSet::Pop(&queue); | |
| 616 ASSERT(reference_set != NULL); | |
| 617 intptr_t num_keys = reference_set->num_keys(); | |
| 618 intptr_t num_values = reference_set->num_values(); | |
| 619 if ((num_keys == 1) && (num_values == 1) && | |
| 620 reference_set->SingletonKeyEqualsValue()) { | |
| 621 // We do not have to process sets that have just one key/value pair | |
| 622 // and the key and value are identical. | |
| 623 continue; | |
| 624 } | |
| 625 bool is_unreachable = true; | |
| 626 // Test each key object for reachability. If a key object is | |
| 627 // reachable, all value objects should be scavenged. | |
| 628 for (intptr_t k = 0; k < num_keys; ++k) { | |
| 629 if (!IsUnreachable(reference_set->get_key(k))) { | |
| 630 for (intptr_t v = 0; v < num_values; ++v) { | |
| 631 RawObject** raw_obj_addr = reference_set->get_value(v); | |
| 632 RawObject* raw_obj = *raw_obj_addr; | |
| 633 // Only visit heap objects which are in from space, aka new objects | |
| 634 // not in to space. This avoids visiting a value multiple times | |
| 635 // during a scavenge. | |
| 636 if (raw_obj->IsHeapObject() && | |
| 637 raw_obj->IsNewObject() && | |
| 638 !to_->Contains(RawObject::ToAddr(raw_obj))) { | |
| 639 visitor->VisitPointer(raw_obj_addr); | |
| 640 } | |
| 641 } | |
| 642 is_unreachable = false; | |
| 643 // Since we have found a key object that is reachable and all | |
| 644 // value objects have been marked we can break out of iterating | |
| 645 // this set and move on to the next set. | |
| 646 break; | |
| 647 } | |
| 648 } | |
| 649 // If all key objects are unreachable put the reference on a | |
| 650 // delay queue. This reference will be revisited if another | |
| 651 // reference is scavenged. | |
| 652 if (is_unreachable) { | |
| 653 state->DelayWeakReferenceSet(reference_set); | |
| 654 } | |
| 655 } | |
| 656 if ((resolved_top_ < top_) || PromotedStackHasMore()) { | |
| 657 ProcessToSpace(visitor); | |
| 658 } else { | |
| 659 // Break out of the loop if there has been no forward process. | |
| 660 // All key objects in the weak reference sets are unreachable | |
| 661 // so we reset the weak reference sets queue. | |
| 662 state->set_delayed_weak_reference_sets(NULL); | |
| 663 break; | |
| 664 } | |
| 665 } | |
| 666 ASSERT(state->delayed_weak_reference_sets() == NULL); | |
| 667 // All weak reference sets are zone allocated and unmarked references which | |
| 668 // were on the delay queue will be freed when the zone is released in the | |
| 669 // epilog callback. | |
| 670 } | 596 } |
| 671 | 597 |
| 672 | 598 |
| 673 void Scavenger::IterateWeakRoots(Isolate* isolate, | |
| 674 HandleVisitor* visitor, | |
| 675 bool visit_prologue_weak_persistent_handles) { | |
| 676 isolate->VisitWeakPersistentHandles(visitor, | |
| 677 visit_prologue_weak_persistent_handles); | |
| 678 } | |
| 679 | |
| 680 | |
| 681 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { | 599 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { |
| 682 GrowableArray<RawObject*>* delayed_weak_stack = visitor->DelayedWeakStack(); | 600 GrowableArray<RawObject*>* delayed_weak_stack = visitor->DelayedWeakStack(); |
| 683 | 601 |
| 684 // Iterate until all work has been drained. | 602 // Iterate until all work has been drained. |
| 685 while ((resolved_top_ < top_) || | 603 while ((resolved_top_ < top_) || |
| 686 PromotedStackHasMore() || | 604 PromotedStackHasMore() || |
| 687 !delayed_weak_stack->is_empty()) { | 605 !delayed_weak_stack->is_empty()) { |
| 688 while (resolved_top_ < top_) { | 606 while (resolved_top_ < top_) { |
| 689 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); | 607 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); |
| 690 intptr_t class_id = raw_obj->GetClassId(); | 608 intptr_t class_id = raw_obj->GetClassId(); |
| (...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 866 intptr_t promo_candidate_words = | 784 intptr_t promo_candidate_words = |
| 867 (survivor_end_ - FirstObjectStart()) / kWordSize; | 785 (survivor_end_ - FirstObjectStart()) / kWordSize; |
| 868 SemiSpace* from = Prologue(isolate, invoke_api_callbacks); | 786 SemiSpace* from = Prologue(isolate, invoke_api_callbacks); |
| 869 // The API prologue/epilogue may create/destroy zones, so we must not | 787 // The API prologue/epilogue may create/destroy zones, so we must not |
| 870 // depend on zone allocations surviving beyond the epilogue callback. | 788 // depend on zone allocations surviving beyond the epilogue callback. |
| 871 { | 789 { |
| 872 StackZone zone(Thread::Current()); | 790 StackZone zone(Thread::Current()); |
| 873 // Setup the visitor and run the scavenge. | 791 // Setup the visitor and run the scavenge. |
| 874 ScavengerVisitor visitor(isolate, this, from); | 792 ScavengerVisitor visitor(isolate, this, from); |
| 875 page_space->AcquireDataLock(); | 793 page_space->AcquireDataLock(); |
| 876 const bool prologue_weak_are_strong = !invoke_api_callbacks; | 794 IterateRoots(isolate, &visitor); |
| 877 IterateRoots(isolate, &visitor, prologue_weak_are_strong); | |
| 878 int64_t start = OS::GetCurrentTimeMicros(); | 795 int64_t start = OS::GetCurrentTimeMicros(); |
| 879 ProcessToSpace(&visitor); | 796 ProcessToSpace(&visitor); |
| 880 int64_t middle = OS::GetCurrentTimeMicros(); | 797 int64_t middle = OS::GetCurrentTimeMicros(); |
| 881 IterateWeakReferences(isolate, &visitor); | 798 ScavengerWeakVisitor weak_visitor(this); |
| 882 ScavengerWeakVisitor weak_visitor(this, prologue_weak_are_strong); | 799 IterateWeakRoots(isolate, &weak_visitor); |
| 883 // Include the prologue weak handles, since we must process any promotion. | |
| 884 const bool visit_prologue_weak_handles = true; | |
| 885 IterateWeakRoots(isolate, &weak_visitor, visit_prologue_weak_handles); | |
| 886 visitor.Finalize(); | 800 visitor.Finalize(); |
| 887 ProcessWeakTables(); | 801 ProcessWeakTables(); |
| 888 page_space->ReleaseDataLock(); | 802 page_space->ReleaseDataLock(); |
| 889 | 803 |
| 890 // Scavenge finished. Run accounting. | 804 // Scavenge finished. Run accounting. |
| 891 int64_t end = OS::GetCurrentTimeMicros(); | 805 int64_t end = OS::GetCurrentTimeMicros(); |
| 892 heap_->RecordTime(kProcessToSpace, middle - start); | 806 heap_->RecordTime(kProcessToSpace, middle - start); |
| 893 heap_->RecordTime(kIterateWeaks, end - middle); | 807 heap_->RecordTime(kIterateWeaks, end - middle); |
| 894 stats_history_.Add( | 808 stats_history_.Add( |
| 895 ScavengeStats(start, end, | 809 ScavengeStats(start, end, |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 952 } | 866 } |
| 953 | 867 |
| 954 | 868 |
| 955 void Scavenger::FreeExternal(intptr_t size) { | 869 void Scavenger::FreeExternal(intptr_t size) { |
| 956 ASSERT(size >= 0); | 870 ASSERT(size >= 0); |
| 957 external_size_ -= size; | 871 external_size_ -= size; |
| 958 ASSERT(external_size_ >= 0); | 872 ASSERT(external_size_ >= 0); |
| 959 } | 873 } |
| 960 | 874 |
| 961 } // namespace dart | 875 } // namespace dart |
| OLD | NEW |