Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 260 // (mod mask + 1). | 260 // (mod mask + 1). |
| 261 int top_; | 261 int top_; |
| 262 int bottom_; | 262 int bottom_; |
| 263 int mask_; | 263 int mask_; |
| 264 bool overflowed_; | 264 bool overflowed_; |
| 265 | 265 |
| 266 DISALLOW_COPY_AND_ASSIGN(MarkingDeque); | 266 DISALLOW_COPY_AND_ASSIGN(MarkingDeque); |
| 267 }; | 267 }; |
| 268 | 268 |
| 269 | 269 |
| 270 class SlotsBufferAllocator { | |
| 271 public: | |
| 272 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); | |
| 273 void DeallocateBuffer(SlotsBuffer* buffer); | |
| 274 | |
| 275 void DeallocateChain(SlotsBuffer** buffer_address); | |
| 276 }; | |
| 277 | |
| 278 | |
| 270 class SlotsBuffer { | 279 class SlotsBuffer { |
| 271 public: | 280 public: |
| 272 typedef Object** ObjectSlot; | 281 typedef Object** ObjectSlot; |
| 273 | 282 |
| 274 SlotsBuffer(); | 283 SlotsBuffer() { UNREACHABLE(); } |
|
Erik Corry
2011/07/08 13:02:38
I think you should use DISALLOW_IMPLICIT_CONSTRUCT
| |
| 275 ~SlotsBuffer(); | 284 ~SlotsBuffer() { UNREACHABLE(); } |
| 276 | 285 |
| 277 void Clear(); | 286 void Initialize(SlotsBuffer* next_buffer) { |
| 278 void Add(ObjectSlot slot); | 287 idx_ = 0; |
| 279 void Update(); | 288 next_ = next_buffer; |
| 280 void Report(); | 289 if (next_ != NULL) { |
| 290 chain_length_ = next_->chain_length_ + 1; | |
| 291 } else { | |
| 292 chain_length_ = 1; | |
| 293 } | |
| 294 } | |
| 295 | |
| 296 void Add(ObjectSlot slot) { | |
| 297 ASSERT(0 <= idx_ && idx_ < kNumberOfElements); | |
| 298 AsArray()[idx_++] = slot; | |
| 299 ASSERT(reinterpret_cast<Address>(AsArray() + idx_) <= | |
| 300 reinterpret_cast<Address>(this) + kSizeWords * kPointerSize); | |
| 301 } | |
| 302 | |
| 303 void UpdateSlots(); | |
| 304 | |
| 305 SlotsBuffer* next() { return next_; } | |
| 306 | |
| 307 static int SizeOfChain(SlotsBuffer* buffer) { | |
| 308 if (buffer == NULL) return 0; | |
| 309 return buffer->idx_ + (buffer->chain_length_ - 1) * kNumberOfElements; | |
| 310 } | |
| 311 | |
| 312 inline bool IsFull() { | |
| 313 return idx_ == kNumberOfElements; | |
| 314 } | |
| 315 | |
| 316 static void UpdateSlotsRecordedIn(SlotsBuffer* buffer) { | |
| 317 while (buffer != NULL) { | |
| 318 buffer->UpdateSlots(); | |
| 319 buffer = buffer->next(); | |
| 320 } | |
| 321 } | |
| 322 | |
| 323 enum AdditionMode { | |
| 324 FAIL_ON_OVERFLOW, | |
| 325 IGNORE_OVERFLOW | |
| 326 }; | |
| 327 | |
| 328 static bool AddTo(SlotsBufferAllocator* allocator, | |
| 329 SlotsBuffer** buffer_address, | |
| 330 ObjectSlot slot, | |
| 331 AdditionMode mode) { | |
| 332 SlotsBuffer* buffer = *buffer_address; | |
| 333 if (buffer == NULL || buffer->IsFull()) { | |
| 334 if (mode == FAIL_ON_OVERFLOW && | |
| 335 buffer != NULL && | |
| 336 buffer->chain_length_ >= kChainLengthThreshold) { | |
| 337 allocator->DeallocateChain(buffer_address); | |
| 338 return false; | |
| 339 } | |
| 340 buffer = allocator->AllocateBuffer(buffer); | |
| 341 *buffer_address = buffer; | |
| 342 } | |
| 343 buffer->Add(slot); | |
| 344 return true; | |
| 345 } | |
| 346 | |
| 347 static const int kHeaderSizeWords = 3; | |
| 348 static const int kSizeWords = 1024; | |
| 349 static const int kNumberOfElements = kSizeWords - kHeaderSizeWords; | |
| 281 | 350 |
| 282 private: | 351 private: |
| 283 static const int kBufferSize = 1024; | 352 ObjectSlot* AsArray() { |
| 353 return reinterpret_cast<ObjectSlot*>(this + 1); | |
|
Erik Corry
2011/07/08 13:02:38
This casting seems unnecessary. You could just ha
| |
| 354 } | |
| 284 | 355 |
| 285 List<ObjectSlot*> buffers_; | 356 static const int kChainLengthThreshold = 6; |
| 286 ObjectSlot* buffer_; | |
| 287 | 357 |
| 288 int idx_; | 358 intptr_t idx_; |
| 289 int buffer_idx_; | 359 intptr_t chain_length_; |
| 360 SlotsBuffer* next_; | |
| 290 }; | 361 }; |
| 291 | 362 |
| 292 | 363 |
| 364 STATIC_ASSERT(SlotsBuffer::kHeaderSizeWords * kPointerSize == | |
| 365 sizeof(SlotsBuffer)); | |
| 366 | |
| 367 | |
| 293 // ------------------------------------------------------------------------- | 368 // ------------------------------------------------------------------------- |
| 294 // Mark-Compact collector | 369 // Mark-Compact collector |
| 295 class MarkCompactCollector { | 370 class MarkCompactCollector { |
| 296 public: | 371 public: |
| 297 // Type of functions to compute forwarding addresses of objects in | 372 // Type of functions to compute forwarding addresses of objects in |
| 298 // compacted spaces. Given an object and its size, return a (non-failure) | 373 // compacted spaces. Given an object and its size, return a (non-failure) |
| 299 // Object* that will be the object after forwarding. There is a separate | 374 // Object* that will be the object after forwarding. There is a separate |
| 300 // allocation function for each (compactable) space based on the location | 375 // allocation function for each (compactable) space based on the location |
| 301 // of the object before compaction. | 376 // of the object before compaction. |
| 302 typedef MaybeObject* (*AllocationFunction)(Heap* heap, | 377 typedef MaybeObject* (*AllocationFunction)(Heap* heap, |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 375 enum SweeperType { | 450 enum SweeperType { |
| 376 CONSERVATIVE, | 451 CONSERVATIVE, |
| 377 LAZY_CONSERVATIVE, | 452 LAZY_CONSERVATIVE, |
| 378 PRECISE | 453 PRECISE |
| 379 }; | 454 }; |
| 380 | 455 |
| 381 // Sweep a single page from the given space conservatively. | 456 // Sweep a single page from the given space conservatively. |
| 382 // Return a number of reclaimed bytes. | 457 // Return a number of reclaimed bytes. |
| 383 static int SweepConservatively(PagedSpace* space, Page* p); | 458 static int SweepConservatively(PagedSpace* space, Page* p); |
| 384 | 459 |
| 385 INLINE(static bool IsOnEvacuationCandidateOrInNewSpace(Object** anchor)) { | 460 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { |
| 386 return Page::FromAddress(reinterpret_cast<Address>(anchor))-> | 461 return Page::FromAddress(reinterpret_cast<Address>(anchor))-> |
| 387 IsEvacuationCandidateOrNewSpace(); | 462 ShouldSkipEvacuationSlotRecording(); |
| 388 } | 463 } |
| 389 | 464 |
| 390 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { | 465 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { |
| 391 return Page::FromAddress(reinterpret_cast<Address>(obj))-> | 466 return Page::FromAddress(reinterpret_cast<Address>(obj))-> |
| 392 IsEvacuationCandidate(); | 467 IsEvacuationCandidate(); |
| 393 } | 468 } |
| 394 | 469 |
| 395 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object)) { | 470 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object)) { |
| 396 if (IsOnEvacuationCandidate(object) && | 471 Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); |
| 397 !IsOnEvacuationCandidateOrInNewSpace(anchor_slot)) { | 472 if (object_page->IsEvacuationCandidate() && |
| 398 slots_buffer_.Add(slot); | 473 !ShouldSkipEvacuationSlotRecording(anchor_slot)) { |
| 474 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
| 475 object_page->slots_buffer_address(), | |
| 476 slot, | |
| 477 SlotsBuffer::FAIL_ON_OVERFLOW)) { | |
| 478 if (FLAG_trace_fragmentation) { | |
| 479 PrintF("Page %p is too popular. Disabling evacuation.\n", | |
| 480 reinterpret_cast<void*>(object_page)); | |
| 481 } | |
| 482 // TODO(gc) If all evacuation candidates are too popular we | |
| 483 // should stop slots recording entirely. | |
| 484 object_page->ClearEvacuationCandidate(); | |
| 485 if (object_page->owner()->identity() == OLD_DATA_SPACE) { | |
|
Erik Corry
2011/07/08 13:02:38
Comment required!
| |
| 486 evacuation_candidates_.RemoveElement(object_page); | |
| 487 } else { | |
| 488 object_page->SetFlag(Page::RESCAN_ON_EVACUATION); | |
| 489 } | |
| 490 } | |
| 399 } | 491 } |
| 400 } | 492 } |
| 401 | 493 |
| 402 void MigrateObject(Address dst, | 494 void MigrateObject(Address dst, |
| 403 Address src, | 495 Address src, |
| 404 int size, | 496 int size, |
| 405 AllocationSpace to_old_space); | 497 AllocationSpace to_old_space); |
| 406 | 498 |
| 407 bool TryPromoteObject(HeapObject* object, int object_size); | 499 bool TryPromoteObject(HeapObject* object, int object_size); |
| 408 | 500 |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 430 bool sweep_precisely_; | 522 bool sweep_precisely_; |
| 431 | 523 |
| 432 // True if we are collecting slots to perform evacuation from evacuation | 524 // True if we are collecting slots to perform evacuation from evacuation |
| 433 // candidates. | 525 // candidates. |
| 434 bool compacting_; | 526 bool compacting_; |
| 435 | 527 |
| 436 // A pointer to the current stack-allocated GC tracer object during a full | 528 // A pointer to the current stack-allocated GC tracer object during a full |
| 437 // collection (NULL before and after). | 529 // collection (NULL before and after). |
| 438 GCTracer* tracer_; | 530 GCTracer* tracer_; |
| 439 | 531 |
| 440 SlotsBuffer slots_buffer_; | 532 SlotsBufferAllocator slots_buffer_allocator_; |
| 533 | |
| 534 SlotsBuffer* migration_slots_buffer_; | |
| 441 | 535 |
| 442 // Finishes GC, performs heap verification if enabled. | 536 // Finishes GC, performs heap verification if enabled. |
| 443 void Finish(); | 537 void Finish(); |
| 444 | 538 |
| 445 // ----------------------------------------------------------------------- | 539 // ----------------------------------------------------------------------- |
| 446 // Phase 1: Marking live objects. | 540 // Phase 1: Marking live objects. |
| 447 // | 541 // |
| 448 // Before: The heap has been prepared for garbage collection by | 542 // Before: The heap has been prepared for garbage collection by |
| 449 // MarkCompactCollector::Prepare() and is otherwise in its | 543 // MarkCompactCollector::Prepare() and is otherwise in its |
| 450 // normal state. | 544 // normal state. |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 610 | 704 |
| 611 List<Page*> evacuation_candidates_; | 705 List<Page*> evacuation_candidates_; |
| 612 | 706 |
| 613 friend class Heap; | 707 friend class Heap; |
| 614 }; | 708 }; |
| 615 | 709 |
| 616 | 710 |
| 617 } } // namespace v8::internal | 711 } } // namespace v8::internal |
| 618 | 712 |
| 619 #endif // V8_MARK_COMPACT_H_ | 713 #endif // V8_MARK_COMPACT_H_ |
| OLD | NEW |