Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(33)

Side by Side Diff: runtime/vm/scavenger.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/scavenger.h ('k') | runtime/vm/scavenger_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/scavenger.h" 5 #include "vm/scavenger.h"
6 6
7 #include "vm/dart.h" 7 #include "vm/dart.h"
8 #include "vm/dart_api_state.h" 8 #include "vm/dart_api_state.h"
9 #include "vm/isolate.h" 9 #include "vm/isolate.h"
10 #include "vm/lockers.h" 10 #include "vm/lockers.h"
11 #include "vm/object.h" 11 #include "vm/object.h"
12 #include "vm/object_id_ring.h"
12 #include "vm/object_set.h" 13 #include "vm/object_set.h"
13 #include "vm/object_id_ring.h"
14 #include "vm/safepoint.h" 14 #include "vm/safepoint.h"
15 #include "vm/stack_frame.h" 15 #include "vm/stack_frame.h"
16 #include "vm/store_buffer.h" 16 #include "vm/store_buffer.h"
17 #include "vm/thread_registry.h" 17 #include "vm/thread_registry.h"
18 #include "vm/timeline.h" 18 #include "vm/timeline.h"
19 #include "vm/verifier.h" 19 #include "vm/verifier.h"
20 #include "vm/visitor.h" 20 #include "vm/visitor.h"
21 #include "vm/weak_table.h" 21 #include "vm/weak_table.h"
22 22
23 namespace dart { 23 namespace dart {
(...skipping 11 matching lines...) Expand all
35 35
36 // Scavenger uses RawObject::kMarkBit to distinguish forwarded and non-forwarded 36 // Scavenger uses RawObject::kMarkBit to distinguish forwarded and non-forwarded
37 // objects. The kMarkBit does not intersect with the target address because of 37 // objects. The kMarkBit does not intersect with the target address because of
38 // object alignment. 38 // object alignment.
39 enum { 39 enum {
40 kForwardingMask = 1 << RawObject::kMarkBit, 40 kForwardingMask = 1 << RawObject::kMarkBit,
41 kNotForwarded = 0, 41 kNotForwarded = 0,
42 kForwarded = kForwardingMask, 42 kForwarded = kForwardingMask,
43 }; 43 };
44 44
45
46 static inline bool IsForwarding(uword header) { 45 static inline bool IsForwarding(uword header) {
47 uword bits = header & kForwardingMask; 46 uword bits = header & kForwardingMask;
48 ASSERT((bits == kNotForwarded) || (bits == kForwarded)); 47 ASSERT((bits == kNotForwarded) || (bits == kForwarded));
49 return bits == kForwarded; 48 return bits == kForwarded;
50 } 49 }
51 50
52
53 static inline uword ForwardedAddr(uword header) { 51 static inline uword ForwardedAddr(uword header) {
54 ASSERT(IsForwarding(header)); 52 ASSERT(IsForwarding(header));
55 return header & ~kForwardingMask; 53 return header & ~kForwardingMask;
56 } 54 }
57 55
58
59 static inline void ForwardTo(uword original, uword target) { 56 static inline void ForwardTo(uword original, uword target) {
60 // Make sure forwarding can be encoded. 57 // Make sure forwarding can be encoded.
61 ASSERT((target & kForwardingMask) == 0); 58 ASSERT((target & kForwardingMask) == 0);
62 *reinterpret_cast<uword*>(original) = target | kForwarded; 59 *reinterpret_cast<uword*>(original) = target | kForwarded;
63 } 60 }
64 61
65
66 class ScavengerVisitor : public ObjectPointerVisitor { 62 class ScavengerVisitor : public ObjectPointerVisitor {
67 public: 63 public:
68 explicit ScavengerVisitor(Isolate* isolate, 64 explicit ScavengerVisitor(Isolate* isolate,
69 Scavenger* scavenger, 65 Scavenger* scavenger,
70 SemiSpace* from) 66 SemiSpace* from)
71 : ObjectPointerVisitor(isolate), 67 : ObjectPointerVisitor(isolate),
72 thread_(Thread::Current()), 68 thread_(Thread::Current()),
73 scavenger_(scavenger), 69 scavenger_(scavenger),
74 from_(from), 70 from_(from),
75 heap_(scavenger->heap_), 71 heap_(scavenger->heap_),
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 PageSpace* page_space_; 182 PageSpace* page_space_;
187 RawWeakProperty* delayed_weak_properties_; 183 RawWeakProperty* delayed_weak_properties_;
188 intptr_t bytes_promoted_; 184 intptr_t bytes_promoted_;
189 RawObject* visiting_old_object_; 185 RawObject* visiting_old_object_;
190 186
191 friend class Scavenger; 187 friend class Scavenger;
192 188
193 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor); 189 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitor);
194 }; 190 };
195 191
196
197 class ScavengerWeakVisitor : public HandleVisitor { 192 class ScavengerWeakVisitor : public HandleVisitor {
198 public: 193 public:
199 ScavengerWeakVisitor(Thread* thread, Scavenger* scavenger) 194 ScavengerWeakVisitor(Thread* thread, Scavenger* scavenger)
200 : HandleVisitor(thread), scavenger_(scavenger) { 195 : HandleVisitor(thread), scavenger_(scavenger) {
201 ASSERT(scavenger->heap_->isolate() == thread->isolate()); 196 ASSERT(scavenger->heap_->isolate() == thread->isolate());
202 } 197 }
203 198
204 void VisitHandle(uword addr) { 199 void VisitHandle(uword addr) {
205 FinalizablePersistentHandle* handle = 200 FinalizablePersistentHandle* handle =
206 reinterpret_cast<FinalizablePersistentHandle*>(addr); 201 reinterpret_cast<FinalizablePersistentHandle*>(addr);
207 RawObject** p = handle->raw_addr(); 202 RawObject** p = handle->raw_addr();
208 if (scavenger_->IsUnreachable(p)) { 203 if (scavenger_->IsUnreachable(p)) {
209 handle->UpdateUnreachable(thread()->isolate()); 204 handle->UpdateUnreachable(thread()->isolate());
210 } else { 205 } else {
211 handle->UpdateRelocated(thread()->isolate()); 206 handle->UpdateRelocated(thread()->isolate());
212 } 207 }
213 } 208 }
214 209
215 private: 210 private:
216 Scavenger* scavenger_; 211 Scavenger* scavenger_;
217 212
218 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor); 213 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor);
219 }; 214 };
220 215
221
222 // Visitor used to verify that all old->new references have been added to the 216 // Visitor used to verify that all old->new references have been added to the
223 // StoreBuffers. 217 // StoreBuffers.
224 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor { 218 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor {
225 public: 219 public:
226 VerifyStoreBufferPointerVisitor(Isolate* isolate, const SemiSpace* to) 220 VerifyStoreBufferPointerVisitor(Isolate* isolate, const SemiSpace* to)
227 : ObjectPointerVisitor(isolate), to_(to) {} 221 : ObjectPointerVisitor(isolate), to_(to) {}
228 222
229 void VisitPointers(RawObject** first, RawObject** last) { 223 void VisitPointers(RawObject** first, RawObject** last) {
230 for (RawObject** current = first; current <= last; current++) { 224 for (RawObject** current = first; current <= last; current++) {
231 RawObject* obj = *current; 225 RawObject* obj = *current;
232 if (obj->IsHeapObject() && obj->IsNewObject()) { 226 if (obj->IsHeapObject() && obj->IsNewObject()) {
233 ASSERT(to_->Contains(RawObject::ToAddr(obj))); 227 ASSERT(to_->Contains(RawObject::ToAddr(obj)));
234 } 228 }
235 } 229 }
236 } 230 }
237 231
238 private: 232 private:
239 const SemiSpace* to_; 233 const SemiSpace* to_;
240 234
241 DISALLOW_COPY_AND_ASSIGN(VerifyStoreBufferPointerVisitor); 235 DISALLOW_COPY_AND_ASSIGN(VerifyStoreBufferPointerVisitor);
242 }; 236 };
243 237
244
245 SemiSpace::SemiSpace(VirtualMemory* reserved) 238 SemiSpace::SemiSpace(VirtualMemory* reserved)
246 : reserved_(reserved), region_(NULL, 0) { 239 : reserved_(reserved), region_(NULL, 0) {
247 if (reserved != NULL) { 240 if (reserved != NULL) {
248 region_ = MemoryRegion(reserved_->address(), reserved_->size()); 241 region_ = MemoryRegion(reserved_->address(), reserved_->size());
249 } 242 }
250 } 243 }
251 244
252
253 SemiSpace::~SemiSpace() { 245 SemiSpace::~SemiSpace() {
254 if (reserved_ != NULL) { 246 if (reserved_ != NULL) {
255 #if defined(DEBUG) 247 #if defined(DEBUG)
256 memset(reserved_->address(), Heap::kZapByte, size_in_words() 248 memset(reserved_->address(), Heap::kZapByte,
257 << kWordSizeLog2); 249 size_in_words() << kWordSizeLog2);
258 #endif // defined(DEBUG) 250 #endif // defined(DEBUG)
259 delete reserved_; 251 delete reserved_;
260 } 252 }
261 } 253 }
262 254
263
264 Mutex* SemiSpace::mutex_ = NULL; 255 Mutex* SemiSpace::mutex_ = NULL;
265 SemiSpace* SemiSpace::cache_ = NULL; 256 SemiSpace* SemiSpace::cache_ = NULL;
266 257
267
268 void SemiSpace::InitOnce() { 258 void SemiSpace::InitOnce() {
269 ASSERT(mutex_ == NULL); 259 ASSERT(mutex_ == NULL);
270 mutex_ = new Mutex(); 260 mutex_ = new Mutex();
271 ASSERT(mutex_ != NULL); 261 ASSERT(mutex_ != NULL);
272 } 262 }
273 263
274
275 SemiSpace* SemiSpace::New(intptr_t size_in_words, const char* name) { 264 SemiSpace* SemiSpace::New(intptr_t size_in_words, const char* name) {
276 { 265 {
277 MutexLocker locker(mutex_); 266 MutexLocker locker(mutex_);
278 // TODO(koda): Cache one entry per size. 267 // TODO(koda): Cache one entry per size.
279 if (cache_ != NULL && cache_->size_in_words() == size_in_words) { 268 if (cache_ != NULL && cache_->size_in_words() == size_in_words) {
280 SemiSpace* result = cache_; 269 SemiSpace* result = cache_;
281 cache_ = NULL; 270 cache_ = NULL;
282 return result; 271 return result;
283 } 272 }
284 } 273 }
285 if (size_in_words == 0) { 274 if (size_in_words == 0) {
286 return new SemiSpace(NULL); 275 return new SemiSpace(NULL);
287 } else { 276 } else {
288 intptr_t size_in_bytes = size_in_words << kWordSizeLog2; 277 intptr_t size_in_bytes = size_in_words << kWordSizeLog2;
289 VirtualMemory* reserved = VirtualMemory::Reserve(size_in_bytes); 278 VirtualMemory* reserved = VirtualMemory::Reserve(size_in_bytes);
290 const bool kExecutable = false; 279 const bool kExecutable = false;
291 if ((reserved == NULL) || !reserved->Commit(kExecutable, name)) { 280 if ((reserved == NULL) || !reserved->Commit(kExecutable, name)) {
292 // TODO(koda): If cache_ is not empty, we could try to delete it. 281 // TODO(koda): If cache_ is not empty, we could try to delete it.
293 delete reserved; 282 delete reserved;
294 return NULL; 283 return NULL;
295 } 284 }
296 #if defined(DEBUG) 285 #if defined(DEBUG)
297 memset(reserved->address(), Heap::kZapByte, size_in_bytes); 286 memset(reserved->address(), Heap::kZapByte, size_in_bytes);
298 #endif // defined(DEBUG) 287 #endif // defined(DEBUG)
299 return new SemiSpace(reserved); 288 return new SemiSpace(reserved);
300 } 289 }
301 } 290 }
302 291
303
304 void SemiSpace::Delete() { 292 void SemiSpace::Delete() {
305 #ifdef DEBUG 293 #ifdef DEBUG
306 if (reserved_ != NULL) { 294 if (reserved_ != NULL) {
307 const intptr_t size_in_bytes = size_in_words() << kWordSizeLog2; 295 const intptr_t size_in_bytes = size_in_words() << kWordSizeLog2;
308 memset(reserved_->address(), Heap::kZapByte, size_in_bytes); 296 memset(reserved_->address(), Heap::kZapByte, size_in_bytes);
309 } 297 }
310 #endif 298 #endif
311 SemiSpace* old_cache = NULL; 299 SemiSpace* old_cache = NULL;
312 { 300 {
313 MutexLocker locker(mutex_); 301 MutexLocker locker(mutex_);
314 old_cache = cache_; 302 old_cache = cache_;
315 cache_ = this; 303 cache_ = this;
316 } 304 }
317 delete old_cache; 305 delete old_cache;
318 } 306 }
319 307
320
321 void SemiSpace::WriteProtect(bool read_only) { 308 void SemiSpace::WriteProtect(bool read_only) {
322 if (reserved_ != NULL) { 309 if (reserved_ != NULL) {
323 bool success = reserved_->Protect(read_only ? VirtualMemory::kReadOnly 310 bool success = reserved_->Protect(read_only ? VirtualMemory::kReadOnly
324 : VirtualMemory::kReadWrite); 311 : VirtualMemory::kReadWrite);
325 ASSERT(success); 312 ASSERT(success);
326 } 313 }
327 } 314 }
328 315
329
330 Scavenger::Scavenger(Heap* heap, 316 Scavenger::Scavenger(Heap* heap,
331 intptr_t max_semi_capacity_in_words, 317 intptr_t max_semi_capacity_in_words,
332 uword object_alignment) 318 uword object_alignment)
333 : heap_(heap), 319 : heap_(heap),
334 max_semi_capacity_in_words_(max_semi_capacity_in_words), 320 max_semi_capacity_in_words_(max_semi_capacity_in_words),
335 object_alignment_(object_alignment), 321 object_alignment_(object_alignment),
336 scavenging_(false), 322 scavenging_(false),
337 delayed_weak_properties_(NULL), 323 delayed_weak_properties_(NULL),
338 gc_time_micros_(0), 324 gc_time_micros_(0),
339 collections_(0), 325 collections_(0),
(...skipping 19 matching lines...) Expand all
359 top_ = FirstObjectStart(); 345 top_ = FirstObjectStart();
360 resolved_top_ = top_; 346 resolved_top_ = top_;
361 end_ = to_->end(); 347 end_ = to_->end();
362 348
363 survivor_end_ = FirstObjectStart(); 349 survivor_end_ = FirstObjectStart();
364 350
365 UpdateMaxHeapCapacity(); 351 UpdateMaxHeapCapacity();
366 UpdateMaxHeapUsage(); 352 UpdateMaxHeapUsage();
367 } 353 }
368 354
369
370 Scavenger::~Scavenger() { 355 Scavenger::~Scavenger() {
371 ASSERT(!scavenging_); 356 ASSERT(!scavenging_);
372 to_->Delete(); 357 to_->Delete();
373 } 358 }
374 359
375
376 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { 360 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const {
377 if (stats_history_.Size() == 0) { 361 if (stats_history_.Size() == 0) {
378 return old_size_in_words; 362 return old_size_in_words;
379 } 363 }
380 double garbage = stats_history_.Get(0).GarbageFraction(); 364 double garbage = stats_history_.Get(0).GarbageFraction();
381 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { 365 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
382 return Utils::Minimum(max_semi_capacity_in_words_, 366 return Utils::Minimum(max_semi_capacity_in_words_,
383 old_size_in_words * FLAG_new_gen_growth_factor); 367 old_size_in_words * FLAG_new_gen_growth_factor);
384 } else { 368 } else {
385 return old_size_in_words; 369 return old_size_in_words;
386 } 370 }
387 } 371 }
388 372
389
390 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { 373 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) {
391 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { 374 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) {
392 (isolate->gc_prologue_callback())(); 375 (isolate->gc_prologue_callback())();
393 } 376 }
394 isolate->PrepareForGC(); 377 isolate->PrepareForGC();
395 // Flip the two semi-spaces so that to_ is always the space for allocating 378 // Flip the two semi-spaces so that to_ is always the space for allocating
396 // objects. 379 // objects.
397 SemiSpace* from = to_; 380 SemiSpace* from = to_;
398 381
399 const intptr_t kVmNameSize = 128; 382 const intptr_t kVmNameSize = 128;
400 char vm_name[kVmNameSize]; 383 char vm_name[kVmNameSize];
401 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); 384 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize);
402 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); 385 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name);
403 if (to_ == NULL) { 386 if (to_ == NULL) {
404 // TODO(koda): We could try to recover (collect old space, wait for another 387 // TODO(koda): We could try to recover (collect old space, wait for another
405 // isolate to finish scavenge, etc.). 388 // isolate to finish scavenge, etc.).
406 OUT_OF_MEMORY(); 389 OUT_OF_MEMORY();
407 } 390 }
408 UpdateMaxHeapCapacity(); 391 UpdateMaxHeapCapacity();
409 top_ = FirstObjectStart(); 392 top_ = FirstObjectStart();
410 resolved_top_ = top_; 393 resolved_top_ = top_;
411 end_ = to_->end(); 394 end_ = to_->end();
412 return from; 395 return from;
413 } 396 }
414 397
415
416 void Scavenger::Epilogue(Isolate* isolate, 398 void Scavenger::Epilogue(Isolate* isolate,
417 SemiSpace* from, 399 SemiSpace* from,
418 bool invoke_api_callbacks) { 400 bool invoke_api_callbacks) {
419 // All objects in the to space have been copied from the from space at this 401 // All objects in the to space have been copied from the from space at this
420 // moment. 402 // moment.
421 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); 403 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
422 if (stats_history_.Size() >= 2) { 404 if (stats_history_.Size() >= 2) {
423 // Previous scavenge is only given half as much weight. 405 // Previous scavenge is only given half as much weight.
424 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); 406 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction();
425 avg_frac /= 1.0 + 0.5; // Normalize. 407 avg_frac /= 1.0 + 0.5; // Normalize.
(...skipping 22 matching lines...) Expand all
448 from->Delete(); 430 from->Delete();
449 UpdateMaxHeapUsage(); 431 UpdateMaxHeapUsage();
450 if (heap_ != NULL) { 432 if (heap_ != NULL) {
451 heap_->UpdateGlobalMaxUsed(); 433 heap_->UpdateGlobalMaxUsed();
452 } 434 }
453 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) { 435 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) {
454 (isolate->gc_epilogue_callback())(); 436 (isolate->gc_epilogue_callback())();
455 } 437 }
456 } 438 }
457 439
458
459 void Scavenger::IterateStoreBuffers(Isolate* isolate, 440 void Scavenger::IterateStoreBuffers(Isolate* isolate,
460 ScavengerVisitor* visitor) { 441 ScavengerVisitor* visitor) {
461 // Iterating through the store buffers. 442 // Iterating through the store buffers.
462 // Grab the deduplication sets out of the isolate's consolidated store buffer. 443 // Grab the deduplication sets out of the isolate's consolidated store buffer.
463 StoreBufferBlock* pending = isolate->store_buffer()->Blocks(); 444 StoreBufferBlock* pending = isolate->store_buffer()->Blocks();
464 intptr_t total_count = 0; 445 intptr_t total_count = 0;
465 while (pending != NULL) { 446 while (pending != NULL) {
466 StoreBufferBlock* next = pending->next(); 447 StoreBufferBlock* next = pending->next();
467 // Generated code appends to store buffers; tell MemorySanitizer. 448 // Generated code appends to store buffers; tell MemorySanitizer.
468 MSAN_UNPOISON(pending, sizeof(*pending)); 449 MSAN_UNPOISON(pending, sizeof(*pending));
(...skipping 16 matching lines...) Expand all
485 isolate->store_buffer()->PushBlock(pending, StoreBuffer::kIgnoreThreshold); 466 isolate->store_buffer()->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
486 pending = next; 467 pending = next;
487 } 468 }
488 heap_->RecordData(kStoreBufferEntries, total_count); 469 heap_->RecordData(kStoreBufferEntries, total_count);
489 heap_->RecordData(kDataUnused1, 0); 470 heap_->RecordData(kDataUnused1, 0);
490 heap_->RecordData(kDataUnused2, 0); 471 heap_->RecordData(kDataUnused2, 0);
491 // Done iterating through old objects remembered in the store buffers. 472 // Done iterating through old objects remembered in the store buffers.
492 visitor->VisitingOldObject(NULL); 473 visitor->VisitingOldObject(NULL);
493 } 474 }
494 475
495
496 void Scavenger::IterateObjectIdTable(Isolate* isolate, 476 void Scavenger::IterateObjectIdTable(Isolate* isolate,
497 ScavengerVisitor* visitor) { 477 ScavengerVisitor* visitor) {
498 #ifndef PRODUCT 478 #ifndef PRODUCT
499 if (!FLAG_support_service) { 479 if (!FLAG_support_service) {
500 return; 480 return;
501 } 481 }
502 ObjectIdRing* ring = isolate->object_id_ring(); 482 ObjectIdRing* ring = isolate->object_id_ring();
503 if (ring == NULL) { 483 if (ring == NULL) {
504 // --gc_at_alloc can get us here before the ring has been initialized. 484 // --gc_at_alloc can get us here before the ring has been initialized.
505 ASSERT(FLAG_gc_at_alloc); 485 ASSERT(FLAG_gc_at_alloc);
506 return; 486 return;
507 } 487 }
508 ring->VisitPointers(visitor); 488 ring->VisitPointers(visitor);
509 #endif // !PRODUCT 489 #endif // !PRODUCT
510 } 490 }
511 491
512
513 void Scavenger::IterateRoots(Isolate* isolate, ScavengerVisitor* visitor) { 492 void Scavenger::IterateRoots(Isolate* isolate, ScavengerVisitor* visitor) {
514 int64_t start = OS::GetCurrentMonotonicMicros(); 493 int64_t start = OS::GetCurrentMonotonicMicros();
515 isolate->VisitObjectPointers(visitor, 494 isolate->VisitObjectPointers(visitor,
516 StackFrameIterator::kDontValidateFrames); 495 StackFrameIterator::kDontValidateFrames);
517 int64_t middle = OS::GetCurrentMonotonicMicros(); 496 int64_t middle = OS::GetCurrentMonotonicMicros();
518 IterateStoreBuffers(isolate, visitor); 497 IterateStoreBuffers(isolate, visitor);
519 IterateObjectIdTable(isolate, visitor); 498 IterateObjectIdTable(isolate, visitor);
520 int64_t end = OS::GetCurrentMonotonicMicros(); 499 int64_t end = OS::GetCurrentMonotonicMicros();
521 heap_->RecordData(kToKBAfterStoreBuffer, RoundWordsToKB(UsedInWords())); 500 heap_->RecordData(kToKBAfterStoreBuffer, RoundWordsToKB(UsedInWords()));
522 heap_->RecordTime(kVisitIsolateRoots, middle - start); 501 heap_->RecordTime(kVisitIsolateRoots, middle - start);
523 heap_->RecordTime(kIterateStoreBuffers, end - middle); 502 heap_->RecordTime(kIterateStoreBuffers, end - middle);
524 heap_->RecordTime(kDummyScavengeTime, 0); 503 heap_->RecordTime(kDummyScavengeTime, 0);
525 } 504 }
526 505
527
528 bool Scavenger::IsUnreachable(RawObject** p) { 506 bool Scavenger::IsUnreachable(RawObject** p) {
529 RawObject* raw_obj = *p; 507 RawObject* raw_obj = *p;
530 if (!raw_obj->IsHeapObject()) { 508 if (!raw_obj->IsHeapObject()) {
531 return false; 509 return false;
532 } 510 }
533 if (!raw_obj->IsNewObject()) { 511 if (!raw_obj->IsNewObject()) {
534 return false; 512 return false;
535 } 513 }
536 uword raw_addr = RawObject::ToAddr(raw_obj); 514 uword raw_addr = RawObject::ToAddr(raw_obj);
537 if (to_->Contains(raw_addr)) { 515 if (to_->Contains(raw_addr)) {
538 return false; 516 return false;
539 } 517 }
540 uword header = *reinterpret_cast<uword*>(raw_addr); 518 uword header = *reinterpret_cast<uword*>(raw_addr);
541 if (IsForwarding(header)) { 519 if (IsForwarding(header)) {
542 uword new_addr = ForwardedAddr(header); 520 uword new_addr = ForwardedAddr(header);
543 *p = RawObject::FromAddr(new_addr); 521 *p = RawObject::FromAddr(new_addr);
544 return false; 522 return false;
545 } 523 }
546 return true; 524 return true;
547 } 525 }
548 526
549
550 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { 527 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) {
551 isolate->VisitWeakPersistentHandles(visitor); 528 isolate->VisitWeakPersistentHandles(visitor);
552 } 529 }
553 530
554
555 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { 531 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) {
556 // Iterate until all work has been drained. 532 // Iterate until all work has been drained.
557 while ((resolved_top_ < top_) || PromotedStackHasMore()) { 533 while ((resolved_top_ < top_) || PromotedStackHasMore()) {
558 while (resolved_top_ < top_) { 534 while (resolved_top_ < top_) {
559 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); 535 RawObject* raw_obj = RawObject::FromAddr(resolved_top_);
560 intptr_t class_id = raw_obj->GetClassId(); 536 intptr_t class_id = raw_obj->GetClassId();
561 if (class_id != kWeakPropertyCid) { 537 if (class_id != kWeakPropertyCid) {
562 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor); 538 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor);
563 } else { 539 } else {
564 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); 540 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
606 } else { 582 } else {
607 EnqueueWeakProperty(cur_weak); 583 EnqueueWeakProperty(cur_weak);
608 } 584 }
609 // Advance to next weak property in the queue. 585 // Advance to next weak property in the queue.
610 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); 586 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak);
611 } 587 }
612 } 588 }
613 } 589 }
614 } 590 }
615 591
616
617 void Scavenger::UpdateMaxHeapCapacity() { 592 void Scavenger::UpdateMaxHeapCapacity() {
618 if (heap_ == NULL) { 593 if (heap_ == NULL) {
619 // Some unit tests. 594 // Some unit tests.
620 return; 595 return;
621 } 596 }
622 ASSERT(to_ != NULL); 597 ASSERT(to_ != NULL);
623 ASSERT(heap_ != NULL); 598 ASSERT(heap_ != NULL);
624 Isolate* isolate = heap_->isolate(); 599 Isolate* isolate = heap_->isolate();
625 ASSERT(isolate != NULL); 600 ASSERT(isolate != NULL);
626 isolate->GetHeapNewCapacityMaxMetric()->SetValue(to_->size_in_words() * 601 isolate->GetHeapNewCapacityMaxMetric()->SetValue(to_->size_in_words() *
627 kWordSize); 602 kWordSize);
628 } 603 }
629 604
630
631 void Scavenger::UpdateMaxHeapUsage() { 605 void Scavenger::UpdateMaxHeapUsage() {
632 if (heap_ == NULL) { 606 if (heap_ == NULL) {
633 // Some unit tests. 607 // Some unit tests.
634 return; 608 return;
635 } 609 }
636 ASSERT(to_ != NULL); 610 ASSERT(to_ != NULL);
637 ASSERT(heap_ != NULL); 611 ASSERT(heap_ != NULL);
638 Isolate* isolate = heap_->isolate(); 612 Isolate* isolate = heap_->isolate();
639 ASSERT(isolate != NULL); 613 ASSERT(isolate != NULL);
640 isolate->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize); 614 isolate->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
641 } 615 }
642 616
643
644 void Scavenger::EnqueueWeakProperty(RawWeakProperty* raw_weak) { 617 void Scavenger::EnqueueWeakProperty(RawWeakProperty* raw_weak) {
645 ASSERT(raw_weak->IsHeapObject()); 618 ASSERT(raw_weak->IsHeapObject());
646 ASSERT(raw_weak->IsNewObject()); 619 ASSERT(raw_weak->IsNewObject());
647 ASSERT(raw_weak->IsWeakProperty()); 620 ASSERT(raw_weak->IsWeakProperty());
648 #if defined(DEBUG) 621 #if defined(DEBUG)
649 uword raw_addr = RawObject::ToAddr(raw_weak); 622 uword raw_addr = RawObject::ToAddr(raw_weak);
650 uword header = *reinterpret_cast<uword*>(raw_addr); 623 uword header = *reinterpret_cast<uword*>(raw_addr);
651 ASSERT(!IsForwarding(header)); 624 ASSERT(!IsForwarding(header));
652 #endif // defined(DEBUG) 625 #endif // defined(DEBUG)
653 ASSERT(raw_weak->ptr()->next_ == 0); 626 ASSERT(raw_weak->ptr()->next_ == 0);
654 raw_weak->ptr()->next_ = reinterpret_cast<uword>(delayed_weak_properties_); 627 raw_weak->ptr()->next_ = reinterpret_cast<uword>(delayed_weak_properties_);
655 delayed_weak_properties_ = raw_weak; 628 delayed_weak_properties_ = raw_weak;
656 } 629 }
657 630
658
659 uword Scavenger::ProcessWeakProperty(RawWeakProperty* raw_weak, 631 uword Scavenger::ProcessWeakProperty(RawWeakProperty* raw_weak,
660 ScavengerVisitor* visitor) { 632 ScavengerVisitor* visitor) {
661 // The fate of the weak property is determined by its key. 633 // The fate of the weak property is determined by its key.
662 RawObject* raw_key = raw_weak->ptr()->key_; 634 RawObject* raw_key = raw_weak->ptr()->key_;
663 if (raw_key->IsHeapObject() && raw_key->IsNewObject()) { 635 if (raw_key->IsHeapObject() && raw_key->IsNewObject()) {
664 uword raw_addr = RawObject::ToAddr(raw_key); 636 uword raw_addr = RawObject::ToAddr(raw_key);
665 uword header = *reinterpret_cast<uword*>(raw_addr); 637 uword header = *reinterpret_cast<uword*>(raw_addr);
666 if (!IsForwarding(header)) { 638 if (!IsForwarding(header)) {
667 // Key is white. Enqueue the weak property. 639 // Key is white. Enqueue the weak property.
668 EnqueueWeakProperty(raw_weak); 640 EnqueueWeakProperty(raw_weak);
669 return raw_weak->Size(); 641 return raw_weak->Size();
670 } 642 }
671 } 643 }
672 // Key is gray or black. Make the weak property black. 644 // Key is gray or black. Make the weak property black.
673 return raw_weak->VisitPointersNonvirtual(visitor); 645 return raw_weak->VisitPointersNonvirtual(visitor);
674 } 646 }
675 647
676
677 void Scavenger::ProcessWeakReferences() { 648 void Scavenger::ProcessWeakReferences() {
678 // Rehash the weak tables now that we know which objects survive this cycle. 649 // Rehash the weak tables now that we know which objects survive this cycle.
679 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { 650 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
680 WeakTable* table = 651 WeakTable* table =
681 heap_->GetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel)); 652 heap_->GetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel));
682 heap_->SetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel), 653 heap_->SetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel),
683 WeakTable::NewFrom(table)); 654 WeakTable::NewFrom(table));
684 intptr_t size = table->size(); 655 intptr_t size = table->size();
685 for (intptr_t i = 0; i < size; i++) { 656 for (intptr_t i = 0; i < size; i++) {
686 if (table->IsValidEntryAt(i)) { 657 if (table->IsValidEntryAt(i)) {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
722 #endif // defined(DEBUG) 693 #endif // defined(DEBUG)
723 694
724 WeakProperty::Clear(cur_weak); 695 WeakProperty::Clear(cur_weak);
725 696
726 // Advance to next weak property in the queue. 697 // Advance to next weak property in the queue.
727 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); 698 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak);
728 } 699 }
729 } 700 }
730 } 701 }
731 702
732
733 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { 703 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
734 uword cur = FirstObjectStart(); 704 uword cur = FirstObjectStart();
735 while (cur < top_) { 705 while (cur < top_) {
736 RawObject* raw_obj = RawObject::FromAddr(cur); 706 RawObject* raw_obj = RawObject::FromAddr(cur);
737 cur += raw_obj->VisitPointers(visitor); 707 cur += raw_obj->VisitPointers(visitor);
738 } 708 }
739 } 709 }
740 710
741
742 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { 711 void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
743 uword cur = FirstObjectStart(); 712 uword cur = FirstObjectStart();
744 while (cur < top_) { 713 while (cur < top_) {
745 RawObject* raw_obj = RawObject::FromAddr(cur); 714 RawObject* raw_obj = RawObject::FromAddr(cur);
746 visitor->VisitObject(raw_obj); 715 visitor->VisitObject(raw_obj);
747 cur += raw_obj->Size(); 716 cur += raw_obj->Size();
748 } 717 }
749 } 718 }
750 719
751
752 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { 720 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const {
753 set->AddRegion(to_->start(), to_->end()); 721 set->AddRegion(to_->start(), to_->end());
754 } 722 }
755 723
756
757 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { 724 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const {
758 ASSERT(!scavenging_); 725 ASSERT(!scavenging_);
759 uword cur = FirstObjectStart(); 726 uword cur = FirstObjectStart();
760 if (visitor->VisitRange(cur, top_)) { 727 if (visitor->VisitRange(cur, top_)) {
761 while (cur < top_) { 728 while (cur < top_) {
762 RawObject* raw_obj = RawObject::FromAddr(cur); 729 RawObject* raw_obj = RawObject::FromAddr(cur);
763 uword next = cur + raw_obj->Size(); 730 uword next = cur + raw_obj->Size();
764 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { 731 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) {
765 return raw_obj; // Found object, return it. 732 return raw_obj; // Found object, return it.
766 } 733 }
767 cur = next; 734 cur = next;
768 } 735 }
769 ASSERT(cur == top_); 736 ASSERT(cur == top_);
770 } 737 }
771 return Object::null(); 738 return Object::null();
772 } 739 }
773 740
774
775 void Scavenger::Scavenge() { 741 void Scavenger::Scavenge() {
776 // TODO(cshapiro): Add a decision procedure for determining when the 742 // TODO(cshapiro): Add a decision procedure for determining when the
777 // the API callbacks should be invoked. 743 // the API callbacks should be invoked.
778 Scavenge(false); 744 Scavenge(false);
779 } 745 }
780 746
781
782 void Scavenger::Scavenge(bool invoke_api_callbacks) { 747 void Scavenger::Scavenge(bool invoke_api_callbacks) {
783 Isolate* isolate = heap_->isolate(); 748 Isolate* isolate = heap_->isolate();
784 // Ensure that all threads for this isolate are at a safepoint (either stopped 749 // Ensure that all threads for this isolate are at a safepoint (either stopped
785 // or in native code). If two threads are racing at this point, the loser 750 // or in native code). If two threads are racing at this point, the loser
786 // will continue with its scavenge after waiting for the winner to complete. 751 // will continue with its scavenge after waiting for the winner to complete.
787 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry 752 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry
788 // logic to avoid needless collections. 753 // logic to avoid needless collections.
789 754
790 int64_t pre_safe_point = OS::GetCurrentMonotonicMicros(); 755 int64_t pre_safe_point = OS::GetCurrentMonotonicMicros();
791 756
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
850 OS::PrintErr("Verifying after Scavenge..."); 815 OS::PrintErr("Verifying after Scavenge...");
851 heap_->Verify(kForbidMarked); 816 heap_->Verify(kForbidMarked);
852 OS::PrintErr(" done.\n"); 817 OS::PrintErr(" done.\n");
853 } 818 }
854 819
855 // Done scavenging. Reset the marker. 820 // Done scavenging. Reset the marker.
856 ASSERT(scavenging_); 821 ASSERT(scavenging_);
857 scavenging_ = false; 822 scavenging_ = false;
858 } 823 }
859 824
860
861 void Scavenger::WriteProtect(bool read_only) { 825 void Scavenger::WriteProtect(bool read_only) {
862 ASSERT(!scavenging_); 826 ASSERT(!scavenging_);
863 to_->WriteProtect(read_only); 827 to_->WriteProtect(read_only);
864 } 828 }
865 829
866
867 #ifndef PRODUCT 830 #ifndef PRODUCT
868 void Scavenger::PrintToJSONObject(JSONObject* object) const { 831 void Scavenger::PrintToJSONObject(JSONObject* object) const {
869 if (!FLAG_support_service) { 832 if (!FLAG_support_service) {
870 return; 833 return;
871 } 834 }
872 Isolate* isolate = Isolate::Current(); 835 Isolate* isolate = Isolate::Current();
873 ASSERT(isolate != NULL); 836 ASSERT(isolate != NULL);
874 JSONObject space(object, "new"); 837 JSONObject space(object, "new");
875 space.AddProperty("type", "HeapSpace"); 838 space.AddProperty("type", "HeapSpace");
876 space.AddProperty("name", "new"); 839 space.AddProperty("name", "new");
(...skipping 10 matching lines...) Expand all
887 } else { 850 } else {
888 space.AddProperty("avgCollectionPeriodMillis", 0.0); 851 space.AddProperty("avgCollectionPeriodMillis", 0.0);
889 } 852 }
890 space.AddProperty64("used", UsedInWords() * kWordSize); 853 space.AddProperty64("used", UsedInWords() * kWordSize);
891 space.AddProperty64("capacity", CapacityInWords() * kWordSize); 854 space.AddProperty64("capacity", CapacityInWords() * kWordSize);
892 space.AddProperty64("external", ExternalInWords() * kWordSize); 855 space.AddProperty64("external", ExternalInWords() * kWordSize);
893 space.AddProperty("time", MicrosecondsToSeconds(gc_time_micros())); 856 space.AddProperty("time", MicrosecondsToSeconds(gc_time_micros()));
894 } 857 }
895 #endif // !PRODUCT 858 #endif // !PRODUCT
896 859
897
898 void Scavenger::AllocateExternal(intptr_t size) { 860 void Scavenger::AllocateExternal(intptr_t size) {
899 ASSERT(size >= 0); 861 ASSERT(size >= 0);
900 external_size_ += size; 862 external_size_ += size;
901 } 863 }
902 864
903
904 void Scavenger::FreeExternal(intptr_t size) { 865 void Scavenger::FreeExternal(intptr_t size) {
905 ASSERT(size >= 0); 866 ASSERT(size >= 0);
906 external_size_ -= size; 867 external_size_ -= size;
907 ASSERT(external_size_ >= 0); 868 ASSERT(external_size_ >= 0);
908 } 869 }
909 870
910
911 void Scavenger::Evacuate() { 871 void Scavenger::Evacuate() {
912 // We need a safepoint here to prevent allocation right before or right after 872 // We need a safepoint here to prevent allocation right before or right after
913 // the scavenge. 873 // the scavenge.
914 // The former can introduce an object that we might fail to collect. 874 // The former can introduce an object that we might fail to collect.
915 // The latter means even if the scavenge promotes every object in the new 875 // The latter means even if the scavenge promotes every object in the new
916 // space, the new allocation means the space is not empty, 876 // space, the new allocation means the space is not empty,
917 // causing the assertion below to fail. 877 // causing the assertion below to fail.
918 SafepointOperationScope scope(Thread::Current()); 878 SafepointOperationScope scope(Thread::Current());
919 879
920 // Forces the next scavenge to promote all the objects in the new space. 880 // Forces the next scavenge to promote all the objects in the new space.
921 survivor_end_ = top_; 881 survivor_end_ = top_;
922 Scavenge(); 882 Scavenge();
923 883
924 // It is possible for objects to stay in the new space 884 // It is possible for objects to stay in the new space
925 // if the VM cannot create more pages for these objects. 885 // if the VM cannot create more pages for these objects.
926 ASSERT((UsedInWords() == 0) || failed_to_promote_); 886 ASSERT((UsedInWords() == 0) || failed_to_promote_);
927 } 887 }
928 888
929 } // namespace dart 889 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/scavenger.h ('k') | runtime/vm/scavenger_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698