OLD | NEW |
1 // Copyright 2017 the V8 project authors. All rights reserved. | 1 // Copyright 2017 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/concurrent-marking.h" | 5 #include "src/heap/concurrent-marking.h" |
6 | 6 |
7 #include <stack> | 7 #include <stack> |
8 #include <unordered_map> | 8 #include <unordered_map> |
9 | 9 |
| 10 #include "src/heap/concurrent-marking-deque.h" |
10 #include "src/heap/heap-inl.h" | 11 #include "src/heap/heap-inl.h" |
11 #include "src/heap/heap.h" | 12 #include "src/heap/heap.h" |
12 #include "src/heap/marking.h" | 13 #include "src/heap/marking.h" |
13 #include "src/heap/objects-visiting-inl.h" | 14 #include "src/heap/objects-visiting-inl.h" |
14 #include "src/heap/objects-visiting.h" | 15 #include "src/heap/objects-visiting.h" |
15 #include "src/isolate.h" | 16 #include "src/isolate.h" |
16 #include "src/locked-queue-inl.h" | 17 #include "src/locked-queue-inl.h" |
17 #include "src/utils-inl.h" | 18 #include "src/utils-inl.h" |
18 #include "src/utils.h" | 19 #include "src/utils.h" |
19 #include "src/v8.h" | 20 #include "src/v8.h" |
20 | 21 |
21 namespace v8 { | 22 namespace v8 { |
22 namespace internal { | 23 namespace internal { |
23 | 24 |
24 class ConcurrentMarkingMarkbits { | |
25 public: | |
26 ConcurrentMarkingMarkbits() {} | |
27 ~ConcurrentMarkingMarkbits() { | |
28 for (auto chunk_bitmap : bitmap_) { | |
29 FreeBitmap(chunk_bitmap.second); | |
30 } | |
31 } | |
32 bool Mark(HeapObject* obj) { | |
33 Address address = obj->address(); | |
34 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | |
35 if (bitmap_.count(chunk) == 0) { | |
36 bitmap_[chunk] = AllocateBitmap(); | |
37 } | |
38 MarkBit mark_bit = | |
39 bitmap_[chunk]->MarkBitFromIndex(chunk->AddressToMarkbitIndex(address)); | |
40 if (mark_bit.Get()) return false; | |
41 mark_bit.Set(); | |
42 return true; | |
43 } | |
44 | |
45 Bitmap* AllocateBitmap() { | |
46 return static_cast<Bitmap*>(calloc(1, Bitmap::kSize)); | |
47 } | |
48 | |
49 void FreeBitmap(Bitmap* bitmap) { free(bitmap); } | |
50 | |
51 private: | |
52 std::unordered_map<MemoryChunk*, Bitmap*> bitmap_; | |
53 }; | |
54 | |
55 class ConcurrentMarkingVisitor final | 25 class ConcurrentMarkingVisitor final |
56 : public HeapVisitor<int, ConcurrentMarkingVisitor> { | 26 : public HeapVisitor<int, ConcurrentMarkingVisitor> { |
57 public: | 27 public: |
58 using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>; | 28 using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>; |
59 | 29 |
60 ConcurrentMarkingVisitor() : bytes_marked_(0) {} | 30 explicit ConcurrentMarkingVisitor(ConcurrentMarkingDeque* deque) |
| 31 : deque_(deque) {} |
61 | 32 |
62 void VisitPointers(HeapObject* host, Object** start, Object** end) override { | 33 void VisitPointers(HeapObject* host, Object** start, Object** end) override { |
63 for (Object** p = start; p < end; p++) { | 34 for (Object** p = start; p < end; p++) { |
64 if (!(*p)->IsHeapObject()) continue; | 35 if (!(*p)->IsHeapObject()) continue; |
65 MarkObject(HeapObject::cast(*p)); | 36 MarkObject(HeapObject::cast(*p)); |
66 } | 37 } |
67 } | 38 } |
68 | 39 |
69 // =========================================================================== | 40 // =========================================================================== |
70 // JS object ================================================================= | 41 // JS object ================================================================= |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
147 return 0; | 118 return 0; |
148 } | 119 } |
149 | 120 |
150 int VisitJSWeakCollection(Map* map, JSWeakCollection* object) override { | 121 int VisitJSWeakCollection(Map* map, JSWeakCollection* object) override { |
151 // TODO(ulan): implement iteration of strong fields and push the object to | 122 // TODO(ulan): implement iteration of strong fields and push the object to |
152 // the bailout deque. | 123 // the bailout deque. |
153 return 0; | 124 return 0; |
154 } | 125 } |
155 | 126 |
156 void MarkObject(HeapObject* obj) { | 127 void MarkObject(HeapObject* obj) { |
157 if (markbits_.Mark(obj)) { | 128 deque_->Push(obj, MarkingThread::kConcurrent, TargetDeque::kShared); |
158 marking_stack_.push(obj); | |
159 } | |
160 } | 129 } |
161 | 130 |
162 void MarkTransitively() { | |
163 while (!marking_stack_.empty()) { | |
164 HeapObject* obj = marking_stack_.top(); | |
165 marking_stack_.pop(); | |
166 bytes_marked_ += IterateBody(obj); | |
167 } | |
168 } | |
169 | |
170 size_t bytes_marked() { return bytes_marked_; } | |
171 | |
172 private: | 131 private: |
173 size_t bytes_marked_; | 132 ConcurrentMarkingDeque* deque_; |
174 std::stack<HeapObject*> marking_stack_; | |
175 ConcurrentMarkingMarkbits markbits_; | |
176 }; | 133 }; |
177 | 134 |
178 class ConcurrentMarking::Task : public CancelableTask { | 135 class ConcurrentMarking::Task : public CancelableTask { |
179 public: | 136 public: |
180 Task(Heap* heap, std::vector<HeapObject*>* root_set, | 137 Task(Isolate* isolate, ConcurrentMarking* concurrent_marking, |
181 base::Semaphore* on_finish) | 138 base::Semaphore* on_finish) |
182 : CancelableTask(heap->isolate()), | 139 : CancelableTask(isolate), |
183 heap_(heap), | 140 concurrent_marking_(concurrent_marking), |
184 on_finish_(on_finish), | 141 on_finish_(on_finish) {} |
185 root_set_(root_set) {} | |
186 | 142 |
187 virtual ~Task() {} | 143 virtual ~Task() {} |
188 | 144 |
189 private: | 145 private: |
190 // v8::internal::CancelableTask overrides. | 146 // v8::internal::CancelableTask overrides. |
191 void RunInternal() override { | 147 void RunInternal() override { |
192 double time_ms = heap_->MonotonicallyIncreasingTimeInMs(); | 148 concurrent_marking_->Run(); |
193 { | |
194 TimedScope scope(&time_ms); | |
195 for (HeapObject* obj : *root_set_) { | |
196 marking_visitor_.MarkObject(obj); | |
197 } | |
198 marking_visitor_.MarkTransitively(); | |
199 } | |
200 if (FLAG_trace_concurrent_marking) { | |
201 heap_->isolate()->PrintWithTimestamp( | |
202 "concurrently marked %dKB in %.2fms\n", | |
203 static_cast<int>(marking_visitor_.bytes_marked() / KB), time_ms); | |
204 } | |
205 on_finish_->Signal(); | 149 on_finish_->Signal(); |
206 } | 150 } |
207 | 151 |
208 Heap* heap_; | 152 ConcurrentMarking* concurrent_marking_; |
209 base::Semaphore* on_finish_; | 153 base::Semaphore* on_finish_; |
210 ConcurrentMarkingVisitor marking_visitor_; | |
211 std::vector<HeapObject*>* root_set_; | |
212 DISALLOW_COPY_AND_ASSIGN(Task); | 154 DISALLOW_COPY_AND_ASSIGN(Task); |
213 }; | 155 }; |
214 | 156 |
215 ConcurrentMarking::ConcurrentMarking(Heap* heap) | 157 ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque) |
216 : heap_(heap), pending_task_semaphore_(0), is_task_pending_(false) { | 158 : heap_(heap), |
| 159 pending_task_semaphore_(0), |
| 160 deque_(deque), |
| 161 visitor_(new ConcurrentMarkingVisitor(deque_)), |
| 162 is_task_pending_(false) { |
217 // Concurrent marking does not work with double unboxing. | 163 // Concurrent marking does not work with double unboxing. |
218 STATIC_ASSERT(!(V8_CONCURRENT_MARKING && V8_DOUBLE_FIELDS_UNBOXING)); | 164 STATIC_ASSERT(!(V8_CONCURRENT_MARKING && V8_DOUBLE_FIELDS_UNBOXING)); |
219 // The runtime flag should be set only if the compile time flag was set. | 165 // The runtime flag should be set only if the compile time flag was set. |
220 CHECK(!FLAG_concurrent_marking || V8_CONCURRENT_MARKING); | 166 CHECK(!FLAG_concurrent_marking || V8_CONCURRENT_MARKING); |
221 } | 167 } |
222 | 168 |
223 ConcurrentMarking::~ConcurrentMarking() {} | 169 ConcurrentMarking::~ConcurrentMarking() { delete visitor_; } |
224 | 170 |
225 void ConcurrentMarking::AddRoot(HeapObject* object) { | 171 void ConcurrentMarking::Run() { |
226 root_set_.push_back(object); | 172 double time_ms = heap_->MonotonicallyIncreasingTimeInMs(); |
| 173 size_t bytes_marked = 0; |
| 174 { |
| 175 TimedScope scope(&time_ms); |
| 176 HeapObject* object; |
| 177 while ((object = deque_->Pop(MarkingThread::kConcurrent)) != nullptr) { |
| 178 bytes_marked += visitor_->IterateBody(object); |
| 179 } |
| 180 } |
| 181 if (FLAG_trace_concurrent_marking) { |
| 182 heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n", |
| 183 static_cast<int>(bytes_marked / KB), |
| 184 time_ms); |
| 185 } |
227 } | 186 } |
228 | 187 |
229 void ConcurrentMarking::StartTask() { | 188 void ConcurrentMarking::StartTask() { |
230 if (!FLAG_concurrent_marking) return; | 189 if (!FLAG_concurrent_marking) return; |
231 is_task_pending_ = true; | 190 is_task_pending_ = true; |
232 | |
233 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 191 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
234 new Task(heap_, &root_set_, &pending_task_semaphore_), | 192 new Task(heap_->isolate(), this, &pending_task_semaphore_), |
235 v8::Platform::kShortRunningTask); | 193 v8::Platform::kShortRunningTask); |
236 } | 194 } |
237 | 195 |
238 void ConcurrentMarking::WaitForTaskToComplete() { | 196 void ConcurrentMarking::WaitForTaskToComplete() { |
239 if (!FLAG_concurrent_marking) return; | 197 if (!FLAG_concurrent_marking) return; |
240 pending_task_semaphore_.Wait(); | 198 pending_task_semaphore_.Wait(); |
241 is_task_pending_ = false; | 199 is_task_pending_ = false; |
242 root_set_.clear(); | |
243 } | 200 } |
244 | 201 |
245 void ConcurrentMarking::EnsureTaskCompleted() { | 202 void ConcurrentMarking::EnsureTaskCompleted() { |
246 if (IsTaskPending()) { | 203 if (IsTaskPending()) { |
247 WaitForTaskToComplete(); | 204 WaitForTaskToComplete(); |
248 } | 205 } |
249 } | 206 } |
250 | 207 |
251 } // namespace internal | 208 } // namespace internal |
252 } // namespace v8 | 209 } // namespace v8 |
OLD | NEW |