Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: src/heap/mark-compact.h

Issue 2852953004: [heap] Extract marking deque to separate file. (Closed)
Patch Set: fix build.gn Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « BUILD.gn ('k') | src/heap/mark-compact.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_MARK_COMPACT_H_ 5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_ 6 #define V8_HEAP_MARK_COMPACT_H_
7 7
8 #include <deque> 8 #include <deque>
9 9
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
11 #include "src/base/platform/condition-variable.h" 11 #include "src/base/platform/condition-variable.h"
12 #include "src/cancelable-task.h" 12 #include "src/cancelable-task.h"
13 #include "src/heap/marking.h" 13 #include "src/heap/marking.h"
14 #include "src/heap/sequential-marking-deque.h"
14 #include "src/heap/spaces.h" 15 #include "src/heap/spaces.h"
15 #include "src/heap/store-buffer.h" 16 #include "src/heap/store-buffer.h"
16 17
17 namespace v8 { 18 namespace v8 {
18 namespace internal { 19 namespace internal {
19 20
20 // Forward declarations. 21 // Forward declarations.
21 class CodeFlusher; 22 class CodeFlusher;
22 class HeapObjectVisitor; 23 class HeapObjectVisitor;
23 class MarkCompactCollector; 24 class MarkCompactCollector;
24 class MinorMarkCompactCollector; 25 class MinorMarkCompactCollector;
25 class MarkingVisitor; 26 class MarkingVisitor;
26 class ThreadLocalTop; 27 class ThreadLocalTop;
27 28
29 using MarkingDeque = SequentialMarkingDeque;
30
28 class ObjectMarking : public AllStatic { 31 class ObjectMarking : public AllStatic {
29 public: 32 public:
30 V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj, 33 V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj,
31 const MarkingState& state) { 34 const MarkingState& state) {
32 const Address address = obj->address(); 35 const Address address = obj->address();
33 const MemoryChunk* p = MemoryChunk::FromAddress(address); 36 const MemoryChunk* p = MemoryChunk::FromAddress(address);
34 return state.bitmap()->MarkBitFromIndex(p->AddressToMarkbitIndex(address)); 37 return state.bitmap()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
35 } 38 }
36 39
37 static Marking::ObjectColor Color(HeapObject* obj, 40 static Marking::ObjectColor Color(HeapObject* obj,
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 MarkBit markbit = MarkBitFrom(obj, state); 104 MarkBit markbit = MarkBitFrom(obj, state);
102 if (!Marking::GreyToBlack<access_mode>(markbit)) return false; 105 if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
103 state.IncrementLiveBytes<access_mode>(obj->Size()); 106 state.IncrementLiveBytes<access_mode>(obj->Size());
104 return true; 107 return true;
105 } 108 }
106 109
107 private: 110 private:
108 DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking); 111 DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
109 }; 112 };
110 113
111 // ----------------------------------------------------------------------------
112 // Marking deque for tracing live objects.
113 class MarkingDeque {
114 public:
115 explicit MarkingDeque(Heap* heap)
116 : backing_store_(nullptr),
117 backing_store_committed_size_(0),
118 array_(nullptr),
119 top_(0),
120 bottom_(0),
121 mask_(0),
122 overflowed_(false),
123 in_use_(false),
124 uncommit_task_pending_(false),
125 heap_(heap) {}
126
127 void SetUp();
128 void TearDown();
129
130 // Ensures that the marking deque is committed and will stay committed until
131 // StopUsing() is called.
132 void StartUsing();
133 void StopUsing();
134 void Clear();
135
136 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
137
138 inline bool IsEmpty() { return top_ == bottom_; }
139
140 bool overflowed() const { return overflowed_; }
141
142 void ClearOverflowed() { overflowed_ = false; }
143
144 void SetOverflowed() { overflowed_ = true; }
145
146 // Push the object on the marking stack if there is room, otherwise mark the
147 // deque as overflowed and wait for a rescan of the heap.
148 INLINE(bool Push(HeapObject* object)) {
149 DCHECK(object->IsHeapObject());
150 if (IsFull()) {
151 SetOverflowed();
152 return false;
153 } else {
154 array_[top_] = object;
155 top_ = ((top_ + 1) & mask_);
156 return true;
157 }
158 }
159
160 INLINE(HeapObject* Pop()) {
161 DCHECK(!IsEmpty());
162 top_ = ((top_ - 1) & mask_);
163 HeapObject* object = array_[top_];
164 DCHECK(object->IsHeapObject());
165 return object;
166 }
167
168 // Unshift the object into the marking stack if there is room, otherwise mark
169 // the deque as overflowed and wait for a rescan of the heap.
170 INLINE(bool Unshift(HeapObject* object)) {
171 DCHECK(object->IsHeapObject());
172 if (IsFull()) {
173 SetOverflowed();
174 return false;
175 } else {
176 bottom_ = ((bottom_ - 1) & mask_);
177 array_[bottom_] = object;
178 return true;
179 }
180 }
181
182 template <typename Callback>
183 void Iterate(Callback callback) {
184 int i = bottom_;
185 while (i != top_) {
186 callback(array_[i]);
187 i = (i + 1) & mask_;
188 }
189 }
190
191 HeapObject** array() { return array_; }
192 int bottom() { return bottom_; }
193 int top() { return top_; }
194 int mask() { return mask_; }
195 void set_top(int top) { top_ = top; }
196
197 private:
198 // This task uncommits the marking_deque backing store if
199 // markin_deque->in_use_ is false.
200 class UncommitTask : public CancelableTask {
201 public:
202 explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
203 : CancelableTask(isolate), marking_deque_(marking_deque) {}
204
205 private:
206 // CancelableTask override.
207 void RunInternal() override {
208 base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
209 if (!marking_deque_->in_use_) {
210 marking_deque_->Uncommit();
211 }
212 marking_deque_->uncommit_task_pending_ = false;
213 }
214
215 MarkingDeque* marking_deque_;
216 DISALLOW_COPY_AND_ASSIGN(UncommitTask);
217 };
218
219 static const size_t kMaxSize = 4 * MB;
220 static const size_t kMinSize = 256 * KB;
221
222 // Must be called with mutex lock.
223 void EnsureCommitted();
224
225 // Must be called with mutex lock.
226 void Uncommit();
227
228 // Must be called with mutex lock.
229 void StartUncommitTask();
230
231 base::Mutex mutex_;
232
233 base::VirtualMemory* backing_store_;
234 size_t backing_store_committed_size_;
235 HeapObject** array_;
236 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
237 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
238 // (mod mask + 1).
239 int top_;
240 int bottom_;
241 int mask_;
242 bool overflowed_;
243 // in_use_ == true after taking mutex lock implies that the marking deque is
244 // committed and will stay committed at least until in_use_ == false.
245 bool in_use_;
246 bool uncommit_task_pending_;
247 Heap* heap_;
248
249 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
250 };
251
252
253 // CodeFlusher collects candidates for code flushing during marking and 114 // CodeFlusher collects candidates for code flushing during marking and
254 // processes those candidates after marking has completed in order to 115 // processes those candidates after marking has completed in order to
255 // reset those functions referencing code objects that would otherwise 116 // reset those functions referencing code objects that would otherwise
256 // be unreachable. Code objects can be referenced in two ways: 117 // be unreachable. Code objects can be referenced in two ways:
257 // - SharedFunctionInfo references unoptimized code. 118 // - SharedFunctionInfo references unoptimized code.
258 // - JSFunction references either unoptimized or optimized code. 119 // - JSFunction references either unoptimized or optimized code.
259 // We are not allowed to flush unoptimized code for functions that got 120 // We are not allowed to flush unoptimized code for functions that got
260 // optimized or inlined into optimized code, because we might bailout 121 // optimized or inlined into optimized code, because we might bailout
261 // into the unoptimized code again during deoptimization. 122 // into the unoptimized code again during deoptimization.
262 class CodeFlusher { 123 class CodeFlusher {
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after
858 ~EvacuationScope() { collector_->set_evacuation(false); } 719 ~EvacuationScope() { collector_->set_evacuation(false); }
859 720
860 private: 721 private:
861 MarkCompactCollector* collector_; 722 MarkCompactCollector* collector_;
862 }; 723 };
863 724
864 } // namespace internal 725 } // namespace internal
865 } // namespace v8 726 } // namespace v8
866 727
867 #endif // V8_HEAP_MARK_COMPACT_H_ 728 #endif // V8_HEAP_MARK_COMPACT_H_
OLDNEW
« no previous file with comments | « BUILD.gn ('k') | src/heap/mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698