Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/gc_marker.h" | 5 #include "vm/gc_marker.h" |
| 6 | 6 |
| 7 #include "vm/allocation.h" | 7 #include "vm/allocation.h" |
| 8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
| 9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
| 10 #include "vm/log.h" | 10 #include "vm/log.h" |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 170 uintptr_t marked_bytes() const { return marked_bytes_; } | 170 uintptr_t marked_bytes() const { return marked_bytes_; } |
| 171 | 171 |
| 172 intptr_t live_count(intptr_t class_id) { | 172 intptr_t live_count(intptr_t class_id) { |
| 173 return class_stats_count_[class_id]; | 173 return class_stats_count_[class_id]; |
| 174 } | 174 } |
| 175 | 175 |
| 176 intptr_t live_size(intptr_t class_id) { | 176 intptr_t live_size(intptr_t class_id) { |
| 177 return class_stats_size_[class_id]; | 177 return class_stats_size_[class_id]; |
| 178 } | 178 } |
| 179 | 179 |
| 180 // Returns true if some non-zero amount of work was performed. | 180 bool ProcessPendingWeakProperties() { |
| 181 bool DrainMarkingStack() { | 181 bool marked = false; |
| 182 RawWeakProperty* cur_weak = delayed_weak_properties_; | |
| 183 delayed_weak_properties_ = NULL; | |
| 184 while (cur_weak != NULL) { | |
| 185 uword next_weak = cur_weak->ptr()->next_; | |
| 186 RawObject* raw_key = cur_weak->ptr()->key_; | |
| 187 // Reset the next pointer in the weak property. | |
| 188 cur_weak->ptr()->next_ = 0; | |
| 189 if (raw_key->IsMarked()) { | |
| 190 RawObject* raw_val = cur_weak->ptr()->value_; | |
| 191 marked = marked || (raw_val->IsHeapObject() && !raw_val->IsMarked()); | |
| 192 | |
| 193 // The key is marked so we make sure to properly visit all pointers | |
| 194 // originating from this weak property. | |
| 195 VisitingOldObject(cur_weak); | |
| 196 cur_weak->VisitPointers(this); | |
| 197 } else { | |
| 198 // Requeue this weak property to be handled later. | |
| 199 EnqueueWeakProperty(cur_weak); | |
| 200 } | |
| 201 // Advance to next weak property in the queue. | |
| 202 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | |
| 203 } | |
| 204 | |
| 205 return marked; | |
| 206 } | |
| 207 | |
| 208 void DrainMarkingStack() { | |
| 182 RawObject* raw_obj = work_list_.Pop(); | 209 RawObject* raw_obj = work_list_.Pop(); |
| 210 if ((raw_obj == NULL) && ProcessPendingWeakProperties()) { | |
| 211 raw_obj = work_list_.Pop(); | |
| 212 } | |
| 213 | |
| 183 if (raw_obj == NULL) { | 214 if (raw_obj == NULL) { |
| 184 ASSERT(visiting_old_object_ == NULL); | 215 ASSERT(visiting_old_object_ == NULL); |
| 185 return false; | 216 return; |
| 186 } | 217 } |
| 187 do { | 218 do { |
| 188 do { | 219 do { |
| 189 // First drain the marking stacks. | 220 // First drain the marking stacks. |
| 190 VisitingOldObject(raw_obj); | 221 VisitingOldObject(raw_obj); |
| 191 const intptr_t class_id = raw_obj->GetClassId(); | 222 const intptr_t class_id = raw_obj->GetClassId(); |
| 192 if (class_id != kWeakPropertyCid) { | 223 if (class_id != kWeakPropertyCid) { |
| 193 marked_bytes_ += raw_obj->VisitPointers(this); | 224 marked_bytes_ += raw_obj->VisitPointers(this); |
| 194 } else { | 225 } else { |
| 195 RawWeakProperty* raw_weak = | 226 RawWeakProperty* raw_weak = |
| 196 reinterpret_cast<RawWeakProperty*>(raw_obj); | 227 reinterpret_cast<RawWeakProperty*>(raw_obj); |
| 197 marked_bytes_ += ProcessWeakProperty(raw_weak); | 228 marked_bytes_ += ProcessWeakProperty(raw_weak); |
| 198 } | 229 } |
| 199 raw_obj = work_list_.Pop(); | 230 raw_obj = work_list_.Pop(); |
| 200 } while (raw_obj != NULL); | 231 } while (raw_obj != NULL); |
| 201 | 232 |
| 202 // Marking stack is empty. | 233 // Marking stack is empty. |
| 203 // Process all the pending weak properties in this visitor. | 234 ProcessPendingWeakProperties(); |
| 204 RawWeakProperty* cur_weak = delayed_weak_properties_; | |
| 205 delayed_weak_properties_ = NULL; | |
| 206 while (cur_weak != NULL) { | |
| 207 uword next_weak = cur_weak->ptr()->next_; | |
| 208 RawObject* raw_key = cur_weak->ptr()->key_; | |
| 209 // Reset the next pointer in the weak property. | |
| 210 cur_weak->ptr()->next_ = 0; | |
| 211 if (raw_key->IsMarked()) { | |
| 212 // The key is marked so we make sure to properly visit all pointers | |
| 213 // originating from this weak property. | |
| 214 VisitingOldObject(cur_weak); | |
| 215 cur_weak->VisitPointers(this); | |
| 216 } else { | |
| 217 // Requeue this weak property to be handled later. | |
| 218 EnqueueWeakProperty(cur_weak); | |
| 219 } | |
| 220 // Advance to next weak property in the queue. | |
| 221 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | |
| 222 } | |
| 223 | 235 |
| 224 // Check whether any further work was pushed either by other markers or | 236 // Check whether any further work was pushed either by other markers or |
| 225 // by the handling of weak properties. | 237 // by the handling of weak properties. |
| 226 raw_obj = work_list_.Pop(); | 238 raw_obj = work_list_.Pop(); |
| 227 } while (raw_obj != NULL); | 239 } while (raw_obj != NULL); |
| 228 VisitingOldObject(NULL); | 240 VisitingOldObject(NULL); |
| 229 return true; | |
| 230 } | 241 } |
| 231 | 242 |
| 232 void VisitPointers(RawObject** first, RawObject** last) { | 243 void VisitPointers(RawObject** first, RawObject** last) { |
| 233 for (RawObject** current = first; current <= last; current++) { | 244 for (RawObject** current = first; current <= last; current++) { |
| 234 MarkObject(*current, current); | 245 MarkObject(*current, current); |
| 235 } | 246 } |
| 236 } | 247 } |
| 237 | 248 |
| 238 bool visit_function_code() const { | 249 bool visit_function_code() const { |
| 239 return skipped_code_functions_ == NULL; | 250 return skipped_code_functions_ == NULL; |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 275 if (skipped_code_functions_ != NULL) { | 286 if (skipped_code_functions_ != NULL) { |
| 276 skipped_code_functions_->DetachCode(); | 287 skipped_code_functions_->DetachCode(); |
| 277 } | 288 } |
| 278 // Clear pending weak properties. | 289 // Clear pending weak properties. |
| 279 RawWeakProperty* cur_weak = delayed_weak_properties_; | 290 RawWeakProperty* cur_weak = delayed_weak_properties_; |
| 280 delayed_weak_properties_ = NULL; | 291 delayed_weak_properties_ = NULL; |
| 281 intptr_t weak_properties_cleared = 0; | 292 intptr_t weak_properties_cleared = 0; |
| 282 while (cur_weak != NULL) { | 293 while (cur_weak != NULL) { |
| 283 uword next_weak = cur_weak->ptr()->next_; | 294 uword next_weak = cur_weak->ptr()->next_; |
| 284 cur_weak->ptr()->next_ = 0; | 295 cur_weak->ptr()->next_ = 0; |
| 296 RELEASE_ASSERT(!cur_weak->ptr()->key_->IsMarked()); | |
| 285 WeakProperty::Clear(cur_weak); | 297 WeakProperty::Clear(cur_weak); |
| 286 weak_properties_cleared++; | 298 weak_properties_cleared++; |
| 287 // Advance to next weak property in the queue. | 299 // Advance to next weak property in the queue. |
| 288 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 300 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
| 289 } | 301 } |
| 290 } | 302 } |
| 291 | 303 |
| 292 void VisitingOldObject(RawObject* obj) { | 304 void VisitingOldObject(RawObject* obj) { |
| 293 ASSERT((obj == NULL) || obj->IsOldObject()); | 305 ASSERT((obj == NULL) || obj->IsOldObject()); |
| 294 visiting_old_object_ = obj; | 306 visiting_old_object_ = obj; |
| (...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 562 Thread* thread = Thread::Current(); | 574 Thread* thread = Thread::Current(); |
| 563 TIMELINE_FUNCTION_GC_DURATION(thread, "MarkTask"); | 575 TIMELINE_FUNCTION_GC_DURATION(thread, "MarkTask"); |
| 564 StackZone stack_zone(thread); | 576 StackZone stack_zone(thread); |
| 565 Zone* zone = stack_zone.GetZone(); | 577 Zone* zone = stack_zone.GetZone(); |
| 566 SkippedCodeFunctions* skipped_code_functions = | 578 SkippedCodeFunctions* skipped_code_functions = |
| 567 collect_code_ ? new(zone) SkippedCodeFunctions() : NULL; | 579 collect_code_ ? new(zone) SkippedCodeFunctions() : NULL; |
| 568 SyncMarkingVisitor visitor(isolate_, heap_, page_space_, marking_stack_, | 580 SyncMarkingVisitor visitor(isolate_, heap_, page_space_, marking_stack_, |
| 569 skipped_code_functions); | 581 skipped_code_functions); |
| 570 // Phase 1: Iterate over roots and drain marking stack in tasks. | 582 // Phase 1: Iterate over roots and drain marking stack in tasks. |
| 571 marker_->IterateRoots(isolate_, &visitor, task_index_, num_tasks_); | 583 marker_->IterateRoots(isolate_, &visitor, task_index_, num_tasks_); |
| 584 | |
| 585 bool more_to_mark = false; | |
| 572 do { | 586 do { |
| 573 visitor.DrainMarkingStack(); | 587 do { |
| 588 visitor.DrainMarkingStack(); | |
| 574 | 589 |
| 575 // I can't find more work right now. If no other task is busy, | 590 // I can't find more work right now. If no other task is busy, |
| 576 // then there will never be more work (NB: 1 is *before* decrement). | 591 // then there will never be more work (NB: 1 is *before* decrement). |
| 577 if (AtomicOperations::FetchAndDecrement(num_busy_) == 1) break; | 592 if (AtomicOperations::FetchAndDecrement(num_busy_) == 1) break; |
| 578 | 593 |
| 579 // Wait for some work to appear. | 594 // Wait for some work to appear. |
| 580 // TODO(iposva): Replace busy-waiting with a solution using Monitor, | 595 // TODO(iposva): Replace busy-waiting with a solution using Monitor, |
| 581 // and redraw the boundaries between stack/visitor/task as needed. | 596 // and redraw the boundaries between stack/visitor/task as needed. |
| 582 while (marking_stack_->IsEmpty() && | 597 while (marking_stack_->IsEmpty() && |
| 583 AtomicOperations::LoadRelaxed(num_busy_) > 0) { | 598 AtomicOperations::LoadRelaxed(num_busy_) > 0) { |
| 599 } | |
| 600 | |
| 601 // If no tasks are busy, there will never be more work. | |
| 602 if (AtomicOperations::LoadRelaxed(num_busy_) == 0) break; | |
| 603 | |
| 604 // I saw some work; get busy and compete for it. | |
| 605 AtomicOperations::FetchAndIncrement(num_busy_); | |
| 606 } while (true); | |
| 607 // Wait for all markers to stop. | |
| 608 barrier_->Sync(); | |
| 609 ASSERT(AtomicOperations::LoadRelaxed(num_busy_) == 0); | |
| 610 | |
| 611 // Check if we have any pending properties with marked keys. | |
| 612 // Those might have been marked by another marker. | |
| 613 more_to_mark = visitor.ProcessPendingWeakProperties(); | |
| 614 if (more_to_mark) { | |
| 615 // We have more work to do. Notify others. | |
| 616 AtomicOperations::FetchAndIncrement(num_busy_); | |
| 584 } | 617 } |
| 585 | 618 |
| 586 // If no tasks are busy, there will never be more work. | 619 // Wait for all other markers to finish processing their pending |
| 587 if (AtomicOperations::LoadRelaxed(num_busy_) == 0) break; | 620 // weak properties and decide if they need to continue marking. |
| 588 | 621 // Caveat: we need two barriers here to make this decision in lock step |
| 589 // I saw some work; get busy and compete for it. | 622 // between all markers and the main thread. |
| 590 AtomicOperations::FetchAndIncrement(num_busy_); | 623 barrier_->Sync(); |
| 591 } while (true); | 624 if (!more_to_mark && (AtomicOperations::LoadRelaxed(num_busy_) > 0)) { |
| 592 ASSERT(AtomicOperations::LoadRelaxed(num_busy_) == 0); | 625 // All markers continue to marker as long as any single marker has |
| 593 barrier_->Sync(); | 626 // some work to do. |
| 627 AtomicOperations::FetchAndIncrement(num_busy_); | |
| 628 more_to_mark = true; | |
| 629 } | |
| 630 barrier_->Sync(); | |
|
siva
2016/06/08 19:45:20
Why is this barrier_->Sync() necessary? At this po
Vyacheslav Egorov (Google)
2016/06/09 08:17:05
Because strictly speaking one thread that has work
siva
2016/06/09 16:39:53
True if we let the threads run we get into a scena
| |
| 631 } while (more_to_mark); | |
| 594 | 632 |
| 595 // Phase 2: Weak processing and follow-up marking on main thread. | 633 // Phase 2: Weak processing and follow-up marking on main thread. |
| 596 barrier_->Sync(); | 634 barrier_->Sync(); |
| 597 | 635 |
| 598 // Phase 3: Finalize results from all markers (detach code, etc.). | 636 // Phase 3: Finalize results from all markers (detach code, etc.). |
| 599 if (FLAG_log_marker_tasks) { | 637 if (FLAG_log_marker_tasks) { |
| 600 THR_Print("Task %" Pd " marked %" Pd " bytes.\n", | 638 THR_Print("Task %" Pd " marked %" Pd " bytes.\n", |
| 601 task_index_, visitor.marked_bytes()); | 639 task_index_, visitor.marked_bytes()); |
| 602 } | 640 } |
| 603 marker_->FinalizeResultsFrom(&visitor); | 641 marker_->FinalizeResultsFrom(&visitor); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 681 uintptr_t num_busy = num_tasks; | 719 uintptr_t num_busy = num_tasks; |
| 682 // Phase 1: Iterate over roots and drain marking stack in tasks. | 720 // Phase 1: Iterate over roots and drain marking stack in tasks. |
| 683 for (intptr_t i = 0; i < num_tasks; ++i) { | 721 for (intptr_t i = 0; i < num_tasks; ++i) { |
| 684 MarkTask* mark_task = | 722 MarkTask* mark_task = |
| 685 new MarkTask(this, isolate, heap_, page_space, &marking_stack, | 723 new MarkTask(this, isolate, heap_, page_space, &marking_stack, |
| 686 &barrier, collect_code, | 724 &barrier, collect_code, |
| 687 i, num_tasks, &num_busy); | 725 i, num_tasks, &num_busy); |
| 688 ThreadPool* pool = Dart::thread_pool(); | 726 ThreadPool* pool = Dart::thread_pool(); |
| 689 pool->Run(mark_task); | 727 pool->Run(mark_task); |
| 690 } | 728 } |
| 691 barrier.Sync(); | 729 bool more_to_mark = false; |
| 730 do { | |
| 731 // Wait for all markers to stop. | |
| 732 barrier.Sync(); | |
| 733 | |
| 734 // Wait for all markers to go through weak properties and verify | |
| 735 // that there are no more objects to mark. | |
| 736 // Note: we need to have two barriers here because we want all markers | |
| 737 // and main thread to make decisions in lock step. | |
| 738 barrier.Sync(); | |
|
siva
2016/06/08 19:45:20
back to back barrier.Sync() calls with no code bet
Vyacheslav Egorov (Google)
2016/06/09 08:17:06
These barriers correspond to barriers in the marke
siva
2016/06/09 16:39:53
Acknowledged.
| |
| 739 more_to_mark = AtomicOperations::LoadRelaxed(&num_busy) > 0; | |
| 740 barrier.Sync(); | |
| 741 } while (more_to_mark); | |
|
siva
2016/06/08 19:45:20
Is this loop necessary could we just assert after
Vyacheslav Egorov (Google)
2016/06/09 08:17:05
Yes, the loop is necessary, because there is a loo
siva
2016/06/09 16:39:53
Acknowledged.
| |
| 692 | 742 |
| 693 // Phase 2: Weak processing on main thread. | 743 // Phase 2: Weak processing on main thread. |
| 694 { | 744 { |
| 695 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakHandleProcessing"); | 745 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakHandleProcessing"); |
| 696 MarkingWeakVisitor mark_weak; | 746 MarkingWeakVisitor mark_weak; |
| 697 IterateWeakRoots(isolate, &mark_weak); | 747 IterateWeakRoots(isolate, &mark_weak); |
| 698 } | 748 } |
| 699 barrier.Sync(); | 749 barrier.Sync(); |
| 700 | 750 |
| 701 // Phase 3: Finalize results from all markers (detach code, etc.). | 751 // Phase 3: Finalize results from all markers (detach code, etc.). |
| 702 barrier.Exit(); | 752 barrier.Exit(); |
| 703 } | 753 } |
| 704 ProcessWeakTables(page_space); | 754 ProcessWeakTables(page_space); |
| 705 ProcessObjectIdTable(isolate); | 755 ProcessObjectIdTable(isolate); |
| 706 } | 756 } |
| 707 Epilogue(isolate, invoke_api_callbacks); | 757 Epilogue(isolate, invoke_api_callbacks); |
| 708 } | 758 } |
| 709 | 759 |
| 710 } // namespace dart | 760 } // namespace dart |
| OLD | NEW |