OLD | NEW |
---|---|
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #ifndef RUNTIME_VM_SCAVENGER_H_ | 5 #ifndef RUNTIME_VM_SCAVENGER_H_ |
6 #define RUNTIME_VM_SCAVENGER_H_ | 6 #define RUNTIME_VM_SCAVENGER_H_ |
7 | 7 |
8 #include "platform/assert.h" | 8 #include "platform/assert.h" |
9 #include "platform/utils.h" | 9 #include "platform/utils.h" |
10 #include "vm/dart.h" | 10 #include "vm/dart.h" |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
119 // During scavenging both the to and from spaces contain "legal" objects. | 119 // During scavenging both the to and from spaces contain "legal" objects. |
120 // During a scavenge this function only returns true for addresses that will | 120 // During a scavenge this function only returns true for addresses that will |
121 // be part of the surviving objects. | 121 // be part of the surviving objects. |
122 bool Contains(uword addr) const { return to_->Contains(addr); } | 122 bool Contains(uword addr) const { return to_->Contains(addr); } |
123 | 123 |
124 RawObject* FindObject(FindObjectVisitor* visitor) const; | 124 RawObject* FindObject(FindObjectVisitor* visitor) const; |
125 | 125 |
126 uword TryAllocate(intptr_t size) { | 126 uword TryAllocate(intptr_t size) { |
127 ASSERT(Utils::IsAligned(size, kObjectAlignment)); | 127 ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
128 ASSERT(heap_ != Dart::vm_isolate()->heap()); | 128 ASSERT(heap_ != Dart::vm_isolate()->heap()); |
129 | |
129 #if defined(DEBUG) | 130 #if defined(DEBUG) |
130 if (FLAG_gc_at_alloc && !scavenging_) { | 131 if (FLAG_gc_at_alloc && !scavenging_) { |
131 Scavenge(); | 132 Scavenge(); |
132 } | 133 } |
133 #endif | 134 #endif |
134 uword result = top_; | 135 |
135 intptr_t remaining = end_ - top_; | 136 Thread* thread = Thread::Current(); |
rmacnak
2017/07/05 17:39:53
This will hurt DBC as TLS is expensive. Pass in th
danunez
2017/07/05 18:12:55
I will do this as part of a separate function, as
| |
136 if (remaining < size) { | 137 uword top = 0; |
138 uword end = 0; | |
139 | |
140 if (!thread->IsMutatorThread()) { | |
141 thread = Isolate::Current()->mutator_thread(); | |
rmacnak
2017/07/05 17:39:53
We cannot access state on another Thread outside o
danunez
2017/07/05 18:12:56
Can't the background thread trigger a full heap GC
| |
142 } | |
143 | |
144 if (thread->heap() != NULL) { | |
rmacnak
2017/07/05 17:39:53
This shouldn't be possible unless we haven't corre
danunez
2017/07/05 18:12:56
This happens if the mutator is unscheduled, but th
| |
145 top = thread->top(); | |
146 end = thread->end(); | |
147 } else { | |
148 top = top_; | |
149 end = end_; | |
150 } | |
151 | |
152 uword result = top; | |
153 intptr_t remaining = end - top; | |
154 if ((remaining < size) || (CapacityInWords() == 0)) { | |
rmacnak
2017/07/05 17:39:53
Why is the second check needed? Only the VM isolat
danunez
2017/07/05 18:12:56
Correct. There is exactly one test that checks the
| |
137 return 0; | 155 return 0; |
138 } | 156 } |
139 ASSERT(to_->Contains(result)); | 157 ASSERT(to_->Contains(result)); |
140 ASSERT((result & kObjectAlignmentMask) == object_alignment_); | 158 ASSERT((result & kObjectAlignmentMask) == object_alignment_); |
141 | 159 |
142 top_ += size; | 160 top += size; |
143 ASSERT(to_->Contains(top_) || (top_ == to_->end())); | 161 ASSERT(to_->Contains(top) || (top == to_->end())); |
162 | |
163 if (thread->heap() != NULL) { | |
164 thread->set_top_offset(top); | |
165 } else { | |
166 top_ = top; | |
167 } | |
168 // We only want to change top_ if mutator is scheduled and therefore | |
169 // has a heap attached. | |
170 | |
171 | |
144 return result; | 172 return result; |
145 } | 173 } |
146 | 174 |
147 // Collect the garbage in this scavenger. | 175 // Collect the garbage in this scavenger. |
148 void Scavenge(); | 176 void Scavenge(); |
149 void Scavenge(bool invoke_api_callbacks); | 177 void Scavenge(bool invoke_api_callbacks); |
150 | 178 |
151 // Promote all live objects. | 179 // Promote all live objects. |
152 void Evacuate() { | 180 void Evacuate() { |
153 Scavenge(); | 181 Scavenge(); |
154 Scavenge(); | 182 Scavenge(); |
155 ASSERT(UsedInWords() == 0); | 183 ASSERT(UsedInWords() == 0); |
156 } | 184 } |
157 | 185 |
158 // Accessors to generate code for inlined allocation. | 186 // Accessors to generate code for inlined allocation. |
159 uword* TopAddress() { return &top_; } | 187 uword* TopAddress() { return &top_; } |
160 uword* EndAddress() { return &end_; } | 188 uword* EndAddress() { return &end_; } |
161 static intptr_t top_offset() { return OFFSET_OF(Scavenger, top_); } | |
162 static intptr_t end_offset() { return OFFSET_OF(Scavenger, end_); } | |
163 | 189 |
164 int64_t UsedInWords() const { | 190 int64_t UsedInWords() const { |
165 return (top_ - FirstObjectStart()) >> kWordSizeLog2; | 191 return (top_ - FirstObjectStart()) >> kWordSizeLog2; |
166 } | 192 } |
167 int64_t CapacityInWords() const { return to_->size_in_words(); } | 193 int64_t CapacityInWords() const { return to_->size_in_words(); } |
168 int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; } | 194 int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; } |
169 SpaceUsage GetCurrentUsage() const { | 195 SpaceUsage GetCurrentUsage() const { |
170 SpaceUsage usage; | 196 SpaceUsage usage; |
171 usage.used_in_words = UsedInWords(); | 197 usage.used_in_words = UsedInWords(); |
172 usage.capacity_in_words = CapacityInWords(); | 198 usage.capacity_in_words = CapacityInWords(); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
226 uword ProcessWeakProperty(RawWeakProperty* raw_weak, | 252 uword ProcessWeakProperty(RawWeakProperty* raw_weak, |
227 ScavengerVisitor* visitor); | 253 ScavengerVisitor* visitor); |
228 void Epilogue(Isolate* isolate, SemiSpace* from, bool invoke_api_callbacks); | 254 void Epilogue(Isolate* isolate, SemiSpace* from, bool invoke_api_callbacks); |
229 | 255 |
230 bool IsUnreachable(RawObject** p); | 256 bool IsUnreachable(RawObject** p); |
231 | 257 |
232 // During a scavenge we need to remember the promoted objects. | 258 // During a scavenge we need to remember the promoted objects. |
233 // This is implemented as a stack of objects at the end of the to space. As | 259 // This is implemented as a stack of objects at the end of the to space. As |
234 // object sizes are always greater than sizeof(uword) and promoted objects do | 260 // object sizes are always greater than sizeof(uword) and promoted objects do |
235 // not consume space in the to space they leave enough room for this stack. | 261 // not consume space in the to space they leave enough room for this stack. |
236 void PushToPromotedStack(uword addr) { | 262 void PushToPromotedStack(uword addr) { |
rmacnak
2017/07/05 17:39:53
Promo stack:
During a scavenge, we should be usin
danunez
2017/07/05 18:12:56
I do not recall why I changed this to use TLS. I w
| |
237 ASSERT(scavenging_); | 263 ASSERT(scavenging_); |
238 end_ -= sizeof(addr); | 264 |
239 ASSERT(end_ > top_); | 265 uword top = 0; |
240 *reinterpret_cast<uword*>(end_) = addr; | 266 uword end = 0; |
267 Thread* thread = Thread::Current(); | |
268 if (!thread->IsMutatorThread()) { | |
269 thread = Isolate::Current()->mutator_thread(); | |
270 } | |
271 | |
272 if (thread->heap() != NULL) { | |
273 top = thread->top(); | |
274 end = thread->end(); | |
275 } else { | |
276 top = top_; | |
277 end = end_; | |
278 } | |
279 | |
280 end -= sizeof(addr); | |
281 | |
282 if (thread->heap() != NULL) { | |
283 thread->set_end_offset(end); | |
284 } else { | |
285 end_ = end; | |
286 } | |
287 | |
288 ASSERT(end > top); | |
289 *reinterpret_cast<uword*>(end) = addr; | |
241 } | 290 } |
242 uword PopFromPromotedStack() { | 291 uword PopFromPromotedStack() { |
243 ASSERT(scavenging_); | 292 ASSERT(scavenging_); |
244 uword result = *reinterpret_cast<uword*>(end_); | 293 |
245 end_ += sizeof(result); | 294 uword end = 0; |
246 ASSERT(end_ <= to_->end()); | 295 Thread* thread = Thread::Current(); |
296 if (!thread->IsMutatorThread()) { | |
297 thread = Isolate::Current()->mutator_thread(); | |
298 } | |
299 | |
300 if (thread->heap() != NULL) { | |
301 end = thread->end(); | |
302 } else { | |
303 end = end_; | |
304 } | |
305 | |
306 uword result = *reinterpret_cast<uword*>(end); | |
307 end += sizeof(result); | |
308 | |
309 if (thread->heap() != NULL) { | |
310 thread->set_end_offset(end); | |
311 } else { | |
312 end_ = end; | |
313 } | |
314 | |
315 ASSERT(end <= to_->end()); | |
247 return result; | 316 return result; |
248 } | 317 } |
249 bool PromotedStackHasMore() const { | 318 bool PromotedStackHasMore() const { |
250 ASSERT(scavenging_); | 319 ASSERT(scavenging_); |
251 return end_ < to_->end(); | 320 uword end = 0; |
321 Thread* thread = Thread::Current(); | |
322 if (!thread->IsMutatorThread()) { | |
323 thread = Isolate::Current()->mutator_thread(); | |
324 } | |
325 | |
326 if (thread->heap() != NULL) { | |
327 end = thread->end(); | |
328 } else { | |
329 end = end_; | |
330 } | |
331 | |
332 return end < to_->end(); | |
252 } | 333 } |
253 | 334 |
254 void UpdateMaxHeapCapacity(); | 335 void UpdateMaxHeapCapacity(); |
255 void UpdateMaxHeapUsage(); | 336 void UpdateMaxHeapUsage(); |
256 | 337 |
257 void ProcessWeakReferences(); | 338 void ProcessWeakReferences(); |
258 | 339 |
259 intptr_t NewSizeInWords(intptr_t old_size_in_words) const; | 340 intptr_t NewSizeInWords(intptr_t old_size_in_words) const; |
260 | 341 |
261 // Accessed from generated code. | 342 // Accessed from generated code. |
(...skipping 28 matching lines...) Expand all Loading... | |
290 RawWeakProperty* delayed_weak_properties_; | 371 RawWeakProperty* delayed_weak_properties_; |
291 | 372 |
292 int64_t gc_time_micros_; | 373 int64_t gc_time_micros_; |
293 intptr_t collections_; | 374 intptr_t collections_; |
294 static const int kStatsHistoryCapacity = 2; | 375 static const int kStatsHistoryCapacity = 2; |
295 RingBuffer<ScavengeStats, kStatsHistoryCapacity> stats_history_; | 376 RingBuffer<ScavengeStats, kStatsHistoryCapacity> stats_history_; |
296 | 377 |
297 // The total size of external data associated with objects in this scavenger. | 378 // The total size of external data associated with objects in this scavenger. |
298 intptr_t external_size_; | 379 intptr_t external_size_; |
299 | 380 |
381 Mutex* space_lock_; | |
rmacnak
2017/07/05 17:39:53
Add a comment describing what this lock protects.
danunez
2017/07/05 18:12:56
I will remove the lock for now. It has no purpose
| |
382 | |
300 friend class ScavengerVisitor; | 383 friend class ScavengerVisitor; |
301 friend class ScavengerWeakVisitor; | 384 friend class ScavengerWeakVisitor; |
302 | 385 |
303 DISALLOW_COPY_AND_ASSIGN(Scavenger); | 386 DISALLOW_COPY_AND_ASSIGN(Scavenger); |
304 }; | 387 }; |
305 | 388 |
306 } // namespace dart | 389 } // namespace dart |
307 | 390 |
308 #endif // RUNTIME_VM_SCAVENGER_H_ | 391 #endif // RUNTIME_VM_SCAVENGER_H_ |
OLD | NEW |