Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: runtime/vm/gc_marker.cc

Issue 1176183002: Refactor delay set to allow more than one marking visitor (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Drain marking stack on a separate thread, just to show we can. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/gc_marker.h ('k') | runtime/vm/raw_object.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/gc_marker.h" 5 #include "vm/gc_marker.h"
6 6
7 #include <map> 7 #include <map>
8 #include <utility> 8 #include <utility>
9 #include <vector> 9 #include <vector>
10 10
11 #include "vm/allocation.h" 11 #include "vm/allocation.h"
12 #include "vm/dart_api_state.h" 12 #include "vm/dart_api_state.h"
13 #include "vm/isolate.h" 13 #include "vm/isolate.h"
14 #include "vm/log.h" 14 #include "vm/log.h"
15 #include "vm/pages.h" 15 #include "vm/pages.h"
16 #include "vm/raw_object.h" 16 #include "vm/raw_object.h"
17 #include "vm/stack_frame.h" 17 #include "vm/stack_frame.h"
18 #include "vm/thread_pool.h"
18 #include "vm/visitor.h" 19 #include "vm/visitor.h"
19 #include "vm/object_id_ring.h" 20 #include "vm/object_id_ring.h"
20 21
21 namespace dart { 22 namespace dart {
22 23
23 // A simple chunked marking stack. 24 // A simple chunked marking stack.
24 class MarkingStack : public ValueObject { 25 class MarkingStack : public ValueObject {
25 public: 26 public:
26 MarkingStack() 27 MarkingStack()
27 : head_(new MarkingStackChunk()), 28 : head_(new MarkingStackChunk()),
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
113 114
114 MarkingStackChunk* head_; 115 MarkingStackChunk* head_;
115 MarkingStackChunk* empty_chunks_; 116 MarkingStackChunk* empty_chunks_;
116 RawObject** marking_stack_; 117 RawObject** marking_stack_;
117 uint32_t top_; 118 uint32_t top_;
118 119
119 DISALLOW_COPY_AND_ASSIGN(MarkingStack); 120 DISALLOW_COPY_AND_ASSIGN(MarkingStack);
120 }; 121 };
121 122
122 123
124 class DelaySet {
125 private:
126 typedef std::multimap<RawObject*, RawWeakProperty*> Map;
127 typedef std::pair<RawObject*, RawWeakProperty*> MapEntry;
128
129 public:
130 DelaySet() : mutex_(new Mutex()) {}
131 ~DelaySet() { delete mutex_; }
132
133 // Returns 'true' if this inserted a new key (not just updated the value).
134 bool Insert(RawWeakProperty* raw_weak) {
135 MutexLocker ml(mutex_);
136 RawObject* raw_key = raw_weak->ptr()->key_;
137 bool new_key = (delay_set_.find(raw_key) == delay_set_.end());
138 delay_set_.insert(std::make_pair(raw_key, raw_weak));
139 return new_key;
140 }
141
142 void ClearAll() {
143 MutexLocker ml(mutex_);
144 Map::iterator it = delay_set_.begin();
145 for (; it != delay_set_.end(); ++it) {
146 WeakProperty::Clear(it->second);
147 }
148 }
149
150 // Visit all values with a key equal to raw_obj.
151 void VisitValuesForKey(RawObject* raw_obj, ObjectPointerVisitor* visitor) {
152 // Extract the range into a temporary vector to iterate over it
153 // while delay_set_ may be modified.
154 std::vector<MapEntry> temp_copy;
155 {
156 MutexLocker ml(mutex_);
157 std::pair<Map::iterator, Map::iterator> ret =
158 delay_set_.equal_range(raw_obj);
159 temp_copy.insert(temp_copy.end(), ret.first, ret.second);
160 delay_set_.erase(ret.first, ret.second);
161 }
162 for (std::vector<MapEntry>::iterator it = temp_copy.begin();
163 it != temp_copy.end(); ++it) {
164 it->second->VisitPointers(visitor);
165 }
166 }
167
168 private:
169 Map delay_set_;
170 Mutex* mutex_;
171 };
172
173
123 class MarkingVisitor : public ObjectPointerVisitor { 174 class MarkingVisitor : public ObjectPointerVisitor {
124 public: 175 public:
125 MarkingVisitor(Isolate* isolate, 176 MarkingVisitor(Isolate* isolate,
126 Heap* heap, 177 Heap* heap,
127 PageSpace* page_space, 178 PageSpace* page_space,
128 MarkingStack* marking_stack, 179 MarkingStack* marking_stack,
180 DelaySet* delay_set,
129 bool visit_function_code) 181 bool visit_function_code)
130 : ObjectPointerVisitor(isolate), 182 : ObjectPointerVisitor(isolate),
131 thread_(Thread::Current()), 183 thread_(Thread::Current()),
132 heap_(heap), 184 heap_(heap),
133 vm_heap_(Dart::vm_isolate()->heap()), 185 vm_heap_(Dart::vm_isolate()->heap()),
134 class_table_(isolate->class_table()), 186 class_table_(isolate->class_table()),
135 page_space_(page_space), 187 page_space_(page_space),
136 marking_stack_(marking_stack), 188 marking_stack_(marking_stack),
189 delay_set_(delay_set),
137 visiting_old_object_(NULL), 190 visiting_old_object_(NULL),
138 visit_function_code_(visit_function_code) { 191 visit_function_code_(visit_function_code) {
139 ASSERT(heap_ != vm_heap_); 192 ASSERT(heap_ != vm_heap_);
140 ASSERT(thread_->isolate() == isolate); 193 ASSERT(thread_->isolate() == isolate);
141 } 194 }
142 195
196 ~MarkingVisitor() {
197 // 'Finalize' should be explicitly called before destruction.
198 ASSERT(marking_stack_ == NULL);
199 }
200
143 MarkingStack* marking_stack() const { return marking_stack_; } 201 MarkingStack* marking_stack() const { return marking_stack_; }
144 202
145 void VisitPointers(RawObject** first, RawObject** last) { 203 void VisitPointers(RawObject** first, RawObject** last) {
146 for (RawObject** current = first; current <= last; current++) { 204 for (RawObject** current = first; current <= last; current++) {
147 MarkObject(*current, current); 205 MarkObject(*current, current);
148 } 206 }
149 } 207 }
150 208
151 bool visit_function_code() const { return visit_function_code_; } 209 bool visit_function_code() const { return visit_function_code_; }
152 210
153 virtual GrowableArray<RawFunction*>* skipped_code_functions() { 211 virtual GrowableArray<RawFunction*>* skipped_code_functions() {
154 return &skipped_code_functions_; 212 return &skipped_code_functions_;
155 } 213 }
156 214
157 void DelayWeakProperty(RawWeakProperty* raw_weak) { 215 // Returns the mark bit. Sets the watch bit if unmarked. (The prior value of
158 RawObject* raw_key = raw_weak->ptr()->key_; 216 // the watched bit is returned in 'watched_before' for validation purposes.)
159 DelaySet::iterator it = delay_set_.find(raw_key); 217 // TODO(koda): When synchronizing header bits, this goes in a single CAS loop.
160 if (it != delay_set_.end()) { 218 static bool EnsureWatchedIfWhite(RawObject* obj, bool* watched_before) {
161 ASSERT(raw_key->IsWatched()); 219 if (obj->IsMarked()) {
220 return false;
221 }
222 if (!obj->IsWatched()) {
223 *watched_before = false;
224 obj->SetWatchedBitUnsynchronized();
162 } else { 225 } else {
163 ASSERT(!raw_key->IsWatched()); 226 *watched_before = true;
164 raw_key->SetWatchedBitUnsynchronized();
165 } 227 }
166 delay_set_.insert(std::make_pair(raw_key, raw_weak)); 228 return true;
167 } 229 }
168 230
231 void ProcessWeakProperty(RawWeakProperty* raw_weak) {
232 // The fate of the weak property is determined by its key.
233 RawObject* raw_key = raw_weak->ptr()->key_;
234 bool watched_before = false;
235 if (raw_key->IsHeapObject() &&
236 raw_key->IsOldObject() &&
237 EnsureWatchedIfWhite(raw_key, &watched_before)) {
238 // Key is white. Delay the weak property.
239 bool new_key = delay_set_->Insert(raw_weak);
240 ASSERT(new_key == !watched_before);
241 } else {
242 // Key is gray or black. Make the weak property black.
243 raw_weak->VisitPointers(this);
244 }
245 }
246
247 // Called when all marking is complete.
169 void Finalize() { 248 void Finalize() {
170 DelaySet::iterator it = delay_set_.begin();
171 for (; it != delay_set_.end(); ++it) {
172 WeakProperty::Clear(it->second);
173 }
174 if (!visit_function_code_) { 249 if (!visit_function_code_) {
175 DetachCode(); 250 DetachCode();
176 } 251 }
252 // Fail fast on attempts to mark after finalizing.
253 marking_stack_ = NULL;
177 } 254 }
178 255
179 void VisitingOldObject(RawObject* obj) { 256 void VisitingOldObject(RawObject* obj) {
180 ASSERT((obj == NULL) || obj->IsOldObject()); 257 ASSERT((obj == NULL) || obj->IsOldObject());
181 visiting_old_object_ = obj; 258 visiting_old_object_ = obj;
182 } 259 }
183 260
184 private: 261 private:
185 void MarkAndPush(RawObject* raw_obj) { 262 void MarkAndPush(RawObject* raw_obj) {
186 ASSERT(raw_obj->IsHeapObject()); 263 ASSERT(raw_obj->IsHeapObject());
187 ASSERT((FLAG_verify_before_gc || FLAG_verify_before_gc) ? 264 ASSERT((FLAG_verify_before_gc || FLAG_verify_before_gc) ?
188 page_space_->Contains(RawObject::ToAddr(raw_obj)) : 265 page_space_->Contains(RawObject::ToAddr(raw_obj)) :
189 true); 266 true);
190 267
191 // Mark the object and push it on the marking stack. 268 // Mark the object and push it on the marking stack.
192 ASSERT(!raw_obj->IsMarked()); 269 ASSERT(!raw_obj->IsMarked());
193 const bool is_watched = raw_obj->IsWatched(); 270 const bool is_watched = raw_obj->IsWatched();
194 raw_obj->SetMarkBitUnsynchronized(); 271 raw_obj->SetMarkBitUnsynchronized();
195 raw_obj->ClearRememberedBitUnsynchronized(); 272 raw_obj->ClearRememberedBitUnsynchronized();
196 raw_obj->ClearWatchedBitUnsynchronized(); 273 raw_obj->ClearWatchedBitUnsynchronized();
197 if (is_watched) { 274 if (is_watched) {
198 std::pair<DelaySet::iterator, DelaySet::iterator> ret; 275 delay_set_->VisitValuesForKey(raw_obj, this);
199 // Visit all elements with a key equal to raw_obj.
200 ret = delay_set_.equal_range(raw_obj);
201 // Create a copy of the range in a temporary vector to iterate over it
202 // while delay_set_ may be modified.
203 std::vector<DelaySetEntry> temp_copy(ret.first, ret.second);
204 delay_set_.erase(ret.first, ret.second);
205 for (std::vector<DelaySetEntry>::iterator it = temp_copy.begin();
206 it != temp_copy.end(); ++it) {
207 it->second->VisitPointers(this);
208 }
209 } 276 }
210 marking_stack_->Push(raw_obj); 277 marking_stack_->Push(raw_obj);
211 } 278 }
212 279
213 void MarkObject(RawObject* raw_obj, RawObject** p) { 280 void MarkObject(RawObject* raw_obj, RawObject** p) {
214 // Fast exit if the raw object is a Smi. 281 // Fast exit if the raw object is a Smi.
215 if (!raw_obj->IsHeapObject()) { 282 if (!raw_obj->IsHeapObject()) {
216 return; 283 return;
217 } 284 }
218 285
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
278 if (FLAG_log_code_drop) { 345 if (FLAG_log_code_drop) {
279 unoptimized_code_count++; 346 unoptimized_code_count++;
280 } 347 }
281 } 348 }
282 } 349 }
283 if (FLAG_log_code_drop) { 350 if (FLAG_log_code_drop) {
284 ISL_Print(" total detached current: %" Pd "\n", current_code_count); 351 ISL_Print(" total detached current: %" Pd "\n", current_code_count);
285 ISL_Print(" total detached unoptimized: %" Pd "\n", 352 ISL_Print(" total detached unoptimized: %" Pd "\n",
286 unoptimized_code_count); 353 unoptimized_code_count);
287 } 354 }
355 // Clean up (even if the array is currently Zone-allocated).
356 skipped_code_functions_.Clear();
288 } 357 }
289 358
290 Thread* thread_; 359 Thread* thread_;
291 Heap* heap_; 360 Heap* heap_;
292 Heap* vm_heap_; 361 Heap* vm_heap_;
293 ClassTable* class_table_; 362 ClassTable* class_table_;
294 PageSpace* page_space_; 363 PageSpace* page_space_;
295 MarkingStack* marking_stack_; 364 MarkingStack* marking_stack_;
365 DelaySet* delay_set_;
296 RawObject* visiting_old_object_; 366 RawObject* visiting_old_object_;
297 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet;
298 typedef std::pair<RawObject*, RawWeakProperty*> DelaySetEntry;
299 DelaySet delay_set_;
300 const bool visit_function_code_; 367 const bool visit_function_code_;
301 GrowableArray<RawFunction*> skipped_code_functions_; 368 GrowableArray<RawFunction*> skipped_code_functions_;
302 369
303 DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitor); 370 DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitor);
304 }; 371 };
305 372
306 373
307 static bool IsUnreachable(const RawObject* raw_obj) { 374 static bool IsUnreachable(const RawObject* raw_obj) {
308 if (!raw_obj->IsHeapObject()) { 375 if (!raw_obj->IsHeapObject()) {
309 return false; 376 return false;
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
442 visitor->VisitingOldObject(raw_obj); 509 visitor->VisitingOldObject(raw_obj);
443 const intptr_t class_id = raw_obj->GetClassId(); 510 const intptr_t class_id = raw_obj->GetClassId();
444 // Currently, classes are considered roots (see issue 18284), so at this 511 // Currently, classes are considered roots (see issue 18284), so at this
445 // point, they should all be marked. 512 // point, they should all be marked.
446 ASSERT(isolate->class_table()->At(class_id)->IsMarked()); 513 ASSERT(isolate->class_table()->At(class_id)->IsMarked());
447 if (class_id != kWeakPropertyCid) { 514 if (class_id != kWeakPropertyCid) {
448 marked_bytes_ += raw_obj->VisitPointers(visitor); 515 marked_bytes_ += raw_obj->VisitPointers(visitor);
449 } else { 516 } else {
450 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); 517 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj);
451 marked_bytes_ += raw_weak->Size(); 518 marked_bytes_ += raw_weak->Size();
452 ProcessWeakProperty(raw_weak, visitor); 519 visitor->ProcessWeakProperty(raw_weak);
453 } 520 }
454 } 521 }
455 visitor->VisitingOldObject(NULL); 522 visitor->VisitingOldObject(NULL);
456 } 523 }
457 524
458 525
459 void GCMarker::ProcessWeakProperty(RawWeakProperty* raw_weak,
460 MarkingVisitor* visitor) {
461 // The fate of the weak property is determined by its key.
462 RawObject* raw_key = raw_weak->ptr()->key_;
463 if (raw_key->IsHeapObject() &&
464 raw_key->IsOldObject() &&
465 !raw_key->IsMarked()) {
466 // Key is white. Delay the weak property.
467 visitor->DelayWeakProperty(raw_weak);
468 } else {
469 // Key is gray or black. Make the weak property black.
470 raw_weak->VisitPointers(visitor);
471 }
472 }
473
474
475 void GCMarker::ProcessWeakTables(PageSpace* page_space) { 526 void GCMarker::ProcessWeakTables(PageSpace* page_space) {
476 for (int sel = 0; 527 for (int sel = 0;
477 sel < Heap::kNumWeakSelectors; 528 sel < Heap::kNumWeakSelectors;
478 sel++) { 529 sel++) {
479 WeakTable* table = heap_->GetWeakTable( 530 WeakTable* table = heap_->GetWeakTable(
480 Heap::kOld, static_cast<Heap::WeakSelector>(sel)); 531 Heap::kOld, static_cast<Heap::WeakSelector>(sel));
481 intptr_t size = table->size(); 532 intptr_t size = table->size();
482 for (intptr_t i = 0; i < size; i++) { 533 for (intptr_t i = 0; i < size; i++) {
483 if (table->IsValidEntryAt(i)) { 534 if (table->IsValidEntryAt(i)) {
484 RawObject* raw_obj = table->ObjectAt(i); 535 RawObject* raw_obj = table->ObjectAt(i);
(...skipping 27 matching lines...) Expand all
512 563
513 564
514 void GCMarker::ProcessObjectIdTable(Isolate* isolate) { 565 void GCMarker::ProcessObjectIdTable(Isolate* isolate) {
515 ObjectIdRingClearPointerVisitor visitor(isolate); 566 ObjectIdRingClearPointerVisitor visitor(isolate);
516 ObjectIdRing* ring = isolate->object_id_ring(); 567 ObjectIdRing* ring = isolate->object_id_ring();
517 ASSERT(ring != NULL); 568 ASSERT(ring != NULL);
518 ring->VisitPointers(&visitor); 569 ring->VisitPointers(&visitor);
519 } 570 }
520 571
521 572
573 class DrainTask : public ThreadPool::Task {
574 public:
575 DrainTask(GCMarker* marker,
576 Isolate* isolate,
577 PageSpace* page_space,
578 MarkingVisitor* visitor)
579 : marker_(marker),
580 isolate_(isolate),
581 page_space_(page_space),
582 visitor_(visitor) {
583 MonitorLocker ml(page_space->tasks_lock());
584 page_space->set_tasks(page_space->tasks() + 1);
585 ml.NotifyAll();
Ivan Posva 2015/06/16 17:21:00 Why NotifyAll()? Do you really intend to wakeup al
koda 2015/06/16 19:41:44 Removed.
586 }
587
588 virtual ~DrainTask() {}
589
590 virtual void Run() {
591 Thread::EnterIsolateAsHelper(isolate_);
592 marker_->DrainMarkingStack(isolate_, visitor_);
593 // This task is done. Notify the original thread.
594 {
595 MonitorLocker ml(page_space_->tasks_lock());
596 page_space_->set_tasks(page_space_->tasks() - 1);
597 ml.NotifyAll();
Ivan Posva 2015/06/16 17:21:00 ditto
koda 2015/06/16 19:41:43 Changed to Notify.
598 }
599 Thread::ExitIsolateAsHelper();
600 }
601
602 private:
603 GCMarker* marker_;
604 Isolate* isolate_;
605 PageSpace* page_space_;
606 MarkingVisitor* visitor_;
607 };
608
609
522 void GCMarker::MarkObjects(Isolate* isolate, 610 void GCMarker::MarkObjects(Isolate* isolate,
523 PageSpace* page_space, 611 PageSpace* page_space,
524 bool invoke_api_callbacks, 612 bool invoke_api_callbacks,
525 bool collect_code) { 613 bool collect_code) {
526 const bool visit_function_code = !collect_code; 614 const bool visit_function_code = !collect_code;
527 Prologue(isolate, invoke_api_callbacks); 615 Prologue(isolate, invoke_api_callbacks);
528 // The API prologue/epilogue may create/destroy zones, so we must not 616 // The API prologue/epilogue may create/destroy zones, so we must not
529 // depend on zone allocations surviving beyond the epilogue callback. 617 // depend on zone allocations surviving beyond the epilogue callback.
530 { 618 {
531 StackZone zone(isolate); 619 StackZone zone(isolate);
532 MarkingStack marking_stack; 620 MarkingStack marking_stack;
533 MarkingVisitor mark( 621 DelaySet delay_set;
534 isolate, heap_, page_space, &marking_stack, visit_function_code); 622 const int kNumMarkers = 2;
535 IterateRoots(isolate, &mark, !invoke_api_callbacks); 623 GrowableArray<MarkingVisitor*> markers;
536 DrainMarkingStack(isolate, &mark); 624 for (int i = 0; i < kNumMarkers; ++i) {
Ivan Posva 2015/06/16 17:21:00 It would be much more readable if the DrainTask wo
koda 2015/06/16 19:41:44 Yes, but ThreadPool deletes each Task after it has
537 IterateWeakReferences(isolate, &mark); 625 markers.Add(new MarkingVisitor(
626 isolate, heap_, page_space, &marking_stack, &delay_set,
627 visit_function_code));
628 }
629 // Arbitrarily pick a visitor for the roots.
630 MarkingVisitor* roots_visitor = markers[0];
631 // No parallelism/concurrcy yet, but use a separate visitor to drain the
632 // marking stack on a separate thread, just to show we can.
633 MarkingVisitor* main_visitor = markers[1];
634 IterateRoots(isolate, roots_visitor, !invoke_api_callbacks);
635 Dart::thread_pool()->Run(
636 new DrainTask(this, isolate, page_space, main_visitor));
637 // Wait for task to finish.
638 // TODO(koda): Implement counting barrier utility class and use it here.
639 {
640 MonitorLocker locker(page_space->tasks_lock());
641 while (page_space->tasks() > 1) {
642 locker.Wait();
643 }
644 }
645 IterateWeakReferences(isolate, roots_visitor);
538 MarkingWeakVisitor mark_weak; 646 MarkingWeakVisitor mark_weak;
539 IterateWeakRoots(isolate, &mark_weak, invoke_api_callbacks); 647 IterateWeakRoots(isolate, &mark_weak, invoke_api_callbacks);
540 mark.Finalize(); 648 // All marking is done.
649 for (int i = 0; i < kNumMarkers; ++i) {
650 MarkingVisitor* marker = markers.RemoveLast();
651 marker->Finalize();
652 delete marker;
653 }
654 delay_set.ClearAll();
Ivan Posva 2015/06/16 17:21:00 ClearAll is ambiguous. How about ClearReferences()
koda 2015/06/16 19:41:44 Done.
541 ProcessWeakTables(page_space); 655 ProcessWeakTables(page_space);
542 ProcessObjectIdTable(isolate); 656 ProcessObjectIdTable(isolate);
543 } 657 }
544 Epilogue(isolate, invoke_api_callbacks); 658 Epilogue(isolate, invoke_api_callbacks);
545 } 659 }
546 660
547 } // namespace dart 661 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/gc_marker.h ('k') | runtime/vm/raw_object.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698