OLD | NEW |
1 // Copyright 2017 the V8 project authors. All rights reserved. | 1 // Copyright 2017 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/concurrent-marking.h" | 5 #include "src/heap/concurrent-marking.h" |
6 | 6 |
| 7 #include <stack> |
| 8 #include <unordered_map> |
| 9 |
7 #include "src/heap/heap-inl.h" | 10 #include "src/heap/heap-inl.h" |
8 #include "src/heap/heap.h" | 11 #include "src/heap/heap.h" |
| 12 #include "src/heap/marking.h" |
9 #include "src/isolate.h" | 13 #include "src/isolate.h" |
10 #include "src/locked-queue-inl.h" | 14 #include "src/locked-queue-inl.h" |
11 #include "src/v8.h" | 15 #include "src/v8.h" |
12 | 16 |
13 namespace v8 { | 17 namespace v8 { |
14 namespace internal { | 18 namespace internal { |
15 | 19 |
| 20 class ConcurrentMarkingMarkbits { |
| 21 public: |
| 22 ConcurrentMarkingMarkbits() {} |
| 23 ~ConcurrentMarkingMarkbits() { |
| 24 for (auto chunk_bitmap : bitmap_) { |
| 25 FreeBitmap(chunk_bitmap.second); |
| 26 } |
| 27 } |
| 28 bool Mark(HeapObject* obj) { |
| 29 Address address = obj->address(); |
| 30 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 31 if (bitmap_.count(chunk) == 0) { |
| 32 bitmap_[chunk] = AllocateBitmap(); |
| 33 } |
| 34 MarkBit mark_bit = |
| 35 bitmap_[chunk]->MarkBitFromIndex(chunk->AddressToMarkbitIndex(address)); |
| 36 if (mark_bit.Get()) return false; |
| 37 mark_bit.Set(); |
| 38 return true; |
| 39 } |
| 40 |
| 41 Bitmap* AllocateBitmap() { |
| 42 return static_cast<Bitmap*>(calloc(1, Bitmap::kSize)); |
| 43 } |
| 44 |
| 45 void FreeBitmap(Bitmap* bitmap) { free(bitmap); } |
| 46 |
| 47 private: |
| 48 std::unordered_map<MemoryChunk*, Bitmap*> bitmap_; |
| 49 }; |
| 50 |
| 51 class ConcurrentMarkingVisitor : public ObjectVisitor { |
| 52 public: |
| 53 ConcurrentMarkingVisitor() {} |
| 54 |
| 55 void VisitPointers(Object** start, Object** end) override { |
| 56 for (Object** p = start; p < end; p++) { |
| 57 if (!(*p)->IsHeapObject()) continue; |
| 58 MarkObject(HeapObject::cast(*p)); |
| 59 } |
| 60 } |
| 61 |
| 62 void MarkObject(HeapObject* obj) { |
| 63 if (markbits_.Mark(obj)) { |
| 64 marking_stack_.push(obj); |
| 65 } |
| 66 } |
| 67 |
| 68 void MarkTransitively() { |
| 69 while (!marking_stack_.empty()) { |
| 70 HeapObject* obj = marking_stack_.top(); |
| 71 marking_stack_.pop(); |
| 72 obj->Iterate(this); |
| 73 } |
| 74 } |
| 75 |
| 76 private: |
| 77 std::stack<HeapObject*> marking_stack_; |
| 78 ConcurrentMarkingMarkbits markbits_; |
| 79 }; |
| 80 |
16 class ConcurrentMarking::Task : public CancelableTask { | 81 class ConcurrentMarking::Task : public CancelableTask { |
17 public: | 82 public: |
18 Task(Heap* heap, Queue* queue, base::Semaphore* on_finish) | 83 Task(Heap* heap, std::vector<HeapObject*>* root_set, |
| 84 base::Semaphore* on_finish) |
19 : CancelableTask(heap->isolate()), | 85 : CancelableTask(heap->isolate()), |
20 heap_(heap), | 86 heap_(heap), |
21 queue_(queue), | 87 on_finish_(on_finish), |
22 on_finish_(on_finish) {} | 88 root_set_(root_set) {} |
23 | 89 |
24 virtual ~Task() {} | 90 virtual ~Task() {} |
25 | 91 |
26 private: | 92 private: |
27 // v8::internal::CancelableTask overrides. | 93 // v8::internal::CancelableTask overrides. |
28 void RunInternal() override { | 94 void RunInternal() override { |
29 USE(heap_); | 95 USE(heap_); |
30 HeapObject* object; | 96 for (HeapObject* obj : *root_set_) { |
31 while (queue_->Dequeue(&object)) { | 97 marking_visitor_.MarkObject(obj); |
32 // TODO(ulan): Implement actual marking. | |
33 } | 98 } |
| 99 marking_visitor_.MarkTransitively(); |
34 on_finish_->Signal(); | 100 on_finish_->Signal(); |
35 } | 101 } |
36 | 102 |
37 Heap* heap_; | 103 Heap* heap_; |
38 Queue* queue_; | |
39 base::Semaphore* on_finish_; | 104 base::Semaphore* on_finish_; |
| 105 ConcurrentMarkingVisitor marking_visitor_; |
| 106 std::vector<HeapObject*>* root_set_; |
40 DISALLOW_COPY_AND_ASSIGN(Task); | 107 DISALLOW_COPY_AND_ASSIGN(Task); |
41 }; | 108 }; |
42 | 109 |
43 ConcurrentMarking::ConcurrentMarking(Heap* heap) | 110 ConcurrentMarking::ConcurrentMarking(Heap* heap) |
44 : heap_(heap), pending_tasks_(0), number_of_tasks_(0) {} | 111 : heap_(heap), pending_task_(0) {} |
45 | 112 |
46 ConcurrentMarking::~ConcurrentMarking() {} | 113 ConcurrentMarking::~ConcurrentMarking() {} |
47 | 114 |
48 void ConcurrentMarking::EnqueueObject(HeapObject* object) { | 115 void ConcurrentMarking::AddRoot(HeapObject* object) { |
49 queue_.Enqueue(object); | 116 root_set_.push_back(object); |
50 } | 117 } |
51 | 118 |
52 bool ConcurrentMarking::IsQueueEmpty() { return queue_.IsEmpty(); } | 119 void ConcurrentMarking::StartMarkingTask() { |
| 120 if (!FLAG_concurrent_marking) return; |
53 | 121 |
54 void ConcurrentMarking::StartMarkingTasks(int number_of_tasks) { | 122 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
55 if (!FLAG_concurrent_marking) return; | 123 new Task(heap_, &root_set_, &pending_task_), |
56 DCHECK_EQ(0, number_of_tasks_); | 124 v8::Platform::kShortRunningTask); |
57 | |
58 number_of_tasks_ = number_of_tasks; | |
59 for (int i = 0; i < number_of_tasks; i++) { | |
60 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
61 new Task(heap_, &queue_, &pending_tasks_), | |
62 v8::Platform::kShortRunningTask); | |
63 } | |
64 } | 125 } |
65 | 126 |
66 void ConcurrentMarking::WaitForTasksToComplete() { | 127 void ConcurrentMarking::WaitForTaskToComplete() { |
67 if (!FLAG_concurrent_marking) return; | 128 if (!FLAG_concurrent_marking) return; |
68 | 129 pending_task_.Wait(); |
69 CancelableTaskManager* cancelable_task_manager = | |
70 heap_->isolate()->cancelable_task_manager(); | |
71 for (int i = 0; i < number_of_tasks_; i++) { | |
72 if (cancelable_task_manager->TryAbort(task_ids_[i]) != | |
73 CancelableTaskManager::kTaskAborted) { | |
74 pending_tasks_.Wait(); | |
75 } | |
76 } | |
77 number_of_tasks_ = 0; | |
78 } | 130 } |
79 | 131 |
80 } // namespace internal | 132 } // namespace internal |
81 } // namespace v8 | 133 } // namespace v8 |
OLD | NEW |