OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
(...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
592 return NULL; | 592 return NULL; |
593 } | 593 } |
594 | 594 |
595 | 595 |
596 // Returns zero for pages that have so little fragmentation that it is not | 596 // Returns zero for pages that have so little fragmentation that it is not |
597 // worth defragmenting them. Otherwise a positive integer that gives an | 597 // worth defragmenting them. Otherwise a positive integer that gives an |
598 // estimate of fragmentation on an arbitrary scale. | 598 // estimate of fragmentation on an arbitrary scale. |
599 static int FreeListFragmentation(PagedSpace* space, Page* p) { | 599 static int FreeListFragmentation(PagedSpace* space, Page* p) { |
600 // If page was not swept then there are no free list items on it. | 600 // If page was not swept then there are no free list items on it. |
601 if (!p->WasSwept()) { | 601 if (!p->WasSwept()) { |
602 if (FLAG_trace_fragmentation) { | 602 if (FLAG_trace_fragmentation_verbose) { |
603 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), | 603 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), |
604 AllocationSpaceName(space->identity()), p->LiveBytes()); | 604 AllocationSpaceName(space->identity()), p->LiveBytes()); |
605 } | 605 } |
606 return 0; | 606 return FLAG_always_compact ? 1 : 0; |
607 } | 607 } |
608 | 608 |
609 PagedSpace::SizeStats sizes; | 609 PagedSpace::SizeStats sizes; |
610 space->ObtainFreeListStatistics(p, &sizes); | 610 space->ObtainFreeListStatistics(p, &sizes); |
611 | 611 |
612 intptr_t ratio; | 612 intptr_t ratio; |
613 intptr_t ratio_threshold; | 613 intptr_t ratio_threshold; |
614 intptr_t area_size = space->AreaSize(); | 614 intptr_t area_size = space->AreaSize(); |
615 if (space->identity() == CODE_SPACE) { | 615 if (space->identity() == CODE_SPACE) { |
616 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; | 616 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; |
617 ratio_threshold = 10; | 617 ratio_threshold = 10; |
618 } else { | 618 } else { |
619 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; | 619 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; |
620 ratio_threshold = 15; | 620 ratio_threshold = 15; |
621 } | 621 } |
622 | 622 |
623 if (FLAG_trace_fragmentation) { | 623 if (FLAG_trace_fragmentation_verbose) { |
624 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", | 624 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
625 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), | 625 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), |
626 static_cast<int>(sizes.small_size_), | 626 static_cast<int>(sizes.small_size_), |
627 static_cast<double>(sizes.small_size_ * 100) / area_size, | 627 static_cast<double>(sizes.small_size_ * 100) / area_size, |
628 static_cast<int>(sizes.medium_size_), | 628 static_cast<int>(sizes.medium_size_), |
629 static_cast<double>(sizes.medium_size_ * 100) / area_size, | 629 static_cast<double>(sizes.medium_size_ * 100) / area_size, |
630 static_cast<int>(sizes.large_size_), | 630 static_cast<int>(sizes.large_size_), |
631 static_cast<double>(sizes.large_size_ * 100) / area_size, | 631 static_cast<double>(sizes.large_size_ * 100) / area_size, |
632 static_cast<int>(sizes.huge_size_), | 632 static_cast<int>(sizes.huge_size_), |
633 static_cast<double>(sizes.huge_size_ * 100) / area_size, | 633 static_cast<double>(sizes.huge_size_ * 100) / area_size, |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
689 | 689 |
690 | 690 |
691 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { | 691 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { |
692 // If over-usage is very high (more than a third of the space), we | 692 // If over-usage is very high (more than a third of the space), we |
693 // try to free all mostly empty pages. We expect that almost empty | 693 // try to free all mostly empty pages. We expect that almost empty |
694 // pages are even easier to compact so bump the limit even more. | 694 // pages are even easier to compact so bump the limit even more. |
695 mode = REDUCE_MEMORY_FOOTPRINT; | 695 mode = REDUCE_MEMORY_FOOTPRINT; |
696 max_evacuation_candidates *= 2; | 696 max_evacuation_candidates *= 2; |
697 } | 697 } |
698 | 698 |
| 699 if (FLAG_always_compact) { |
| 700 max_evacuation_candidates = kMaxMaxEvacuationCandidates; |
| 701 } |
| 702 |
699 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { | 703 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { |
700 PrintF( | 704 PrintF( |
701 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " | 705 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " |
702 "evacuation candidate limit: %d\n", | 706 "evacuation candidate limit: %d\n", |
703 static_cast<double>(over_reserved) / MB, | 707 static_cast<double>(over_reserved) / MB, |
704 static_cast<double>(reserved) / MB, | 708 static_cast<double>(reserved) / MB, |
705 static_cast<int>(kFreenessThreshold), max_evacuation_candidates); | 709 static_cast<int>(kFreenessThreshold), max_evacuation_candidates); |
706 } | 710 } |
707 | 711 |
708 intptr_t estimated_release = 0; | 712 intptr_t estimated_release = 0; |
709 | 713 |
710 Candidate candidates[kMaxMaxEvacuationCandidates]; | 714 Candidate candidates[kMaxMaxEvacuationCandidates]; |
711 | 715 |
| 716 if (FLAG_trace_fragmentation && |
| 717 max_evacuation_candidates >= kMaxMaxEvacuationCandidates) { |
| 718 PrintF("Hit max page compaction limit of %d pages\n", |
| 719 kMaxMaxEvacuationCandidates); |
| 720 } |
712 max_evacuation_candidates = | 721 max_evacuation_candidates = |
713 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); | 722 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); |
714 | 723 |
715 int count = 0; | 724 int count = 0; |
716 int fragmentation = 0; | 725 int fragmentation = 0; |
717 Candidate* least = NULL; | 726 Candidate* least = NULL; |
718 | 727 |
719 PageIterator it(space); | 728 PageIterator it(space); |
720 while (it.has_next()) { | 729 while (it.has_next()) { |
721 Page* p = it.next(); | 730 Page* p = it.next(); |
722 if (p->NeverEvacuate()) continue; | 731 if (p->NeverEvacuate()) continue; |
723 | 732 |
724 // Invariant: Evacuation candidates are just created when marking is | 733 // Invariant: Evacuation candidates are just created when marking is |
725 // started. At the end of a GC all evacuation candidates are cleared and | 734 // started. At the end of a GC all evacuation candidates are cleared and |
726 // their slot buffers are released. | 735 // their slot buffers are released. |
727 CHECK(!p->IsEvacuationCandidate()); | 736 CHECK(!p->IsEvacuationCandidate()); |
728 CHECK(p->slots_buffer() == NULL); | 737 CHECK(p->slots_buffer() == NULL); |
729 | 738 |
730 if (FLAG_stress_compaction) { | 739 if (FLAG_stress_compaction) { |
731 unsigned int counter = space->heap()->ms_count(); | 740 unsigned int counter = space->heap()->ms_count(); |
732 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; | 741 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
733 if ((counter & 1) == (page_number & 1)) fragmentation = 1; | 742 if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
734 } else if (mode == REDUCE_MEMORY_FOOTPRINT) { | 743 } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { |
735 // Don't try to release too many pages. | 744 // Don't try to release too many pages. |
736 if (estimated_release >= over_reserved) { | 745 if (estimated_release >= over_reserved) { |
737 continue; | 746 continue; |
738 } | 747 } |
739 | 748 |
740 intptr_t free_bytes = 0; | 749 intptr_t free_bytes = 0; |
741 | 750 |
742 if (!p->WasSwept()) { | 751 if (!p->WasSwept()) { |
743 free_bytes = (p->area_size() - p->LiveBytes()); | 752 free_bytes = (p->area_size() - p->LiveBytes()); |
744 } else { | 753 } else { |
745 PagedSpace::SizeStats sizes; | 754 PagedSpace::SizeStats sizes; |
746 space->ObtainFreeListStatistics(p, &sizes); | 755 space->ObtainFreeListStatistics(p, &sizes); |
747 free_bytes = sizes.Total(); | 756 free_bytes = sizes.Total(); |
748 } | 757 } |
749 | 758 |
750 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); | 759 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); |
751 | 760 |
752 if (free_pct >= kFreenessThreshold) { | 761 if (free_pct >= kFreenessThreshold) { |
753 estimated_release += free_bytes; | 762 estimated_release += free_bytes; |
754 fragmentation = free_pct; | 763 fragmentation = free_pct; |
755 } else { | 764 } else { |
756 fragmentation = 0; | 765 fragmentation = 0; |
757 } | 766 } |
758 | 767 |
759 if (FLAG_trace_fragmentation) { | 768 if (FLAG_trace_fragmentation_verbose) { |
760 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), | 769 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), |
761 AllocationSpaceName(space->identity()), | 770 AllocationSpaceName(space->identity()), |
762 static_cast<int>(free_bytes), | 771 static_cast<int>(free_bytes), |
763 static_cast<double>(free_bytes * 100) / p->area_size(), | 772 static_cast<double>(free_bytes * 100) / p->area_size(), |
764 (fragmentation > 0) ? "[fragmented]" : ""); | 773 (fragmentation > 0) ? "[fragmented]" : ""); |
765 } | 774 } |
766 } else { | 775 } else { |
767 fragmentation = FreeListFragmentation(space, p); | 776 fragmentation = FreeListFragmentation(space, p); |
768 } | 777 } |
769 | 778 |
(...skipping 2465 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3235 | 3244 |
3236 // Clear marking bits for current cell. | 3245 // Clear marking bits for current cell. |
3237 *cell = 0; | 3246 *cell = 0; |
3238 } | 3247 } |
3239 p->ResetLiveBytes(); | 3248 p->ResetLiveBytes(); |
3240 } | 3249 } |
3241 | 3250 |
3242 | 3251 |
3243 void MarkCompactCollector::EvacuatePages() { | 3252 void MarkCompactCollector::EvacuatePages() { |
3244 int npages = evacuation_candidates_.length(); | 3253 int npages = evacuation_candidates_.length(); |
| 3254 int abandoned_pages = 0; |
3245 for (int i = 0; i < npages; i++) { | 3255 for (int i = 0; i < npages; i++) { |
3246 Page* p = evacuation_candidates_[i]; | 3256 Page* p = evacuation_candidates_[i]; |
3247 DCHECK(p->IsEvacuationCandidate() || | 3257 DCHECK(p->IsEvacuationCandidate() || |
3248 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3258 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3249 DCHECK(static_cast<int>(p->parallel_sweeping()) == | 3259 DCHECK(static_cast<int>(p->parallel_sweeping()) == |
3250 MemoryChunk::SWEEPING_DONE); | 3260 MemoryChunk::SWEEPING_DONE); |
3251 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3261 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3252 // Allocate emergency memory for the case when compaction fails due to out | 3262 // Allocate emergency memory for the case when compaction fails due to out |
3253 // of memory. | 3263 // of memory. |
3254 if (!space->HasEmergencyMemory()) { | 3264 if (!space->HasEmergencyMemory()) { |
3255 space->CreateEmergencyMemory(); | 3265 space->CreateEmergencyMemory(); |
3256 } | 3266 } |
3257 if (p->IsEvacuationCandidate()) { | 3267 if (p->IsEvacuationCandidate()) { |
3258 // During compaction we might have to request a new page. Check that we | 3268 // During compaction we might have to request a new page. Check that we |
3259 // have an emergency page and the space still has room for that. | 3269 // have an emergency page and the space still has room for that. |
3260 if (space->HasEmergencyMemory() && space->CanExpand()) { | 3270 if (space->HasEmergencyMemory() || space->CanExpand()) { |
3261 EvacuateLiveObjectsFromPage(p); | 3271 EvacuateLiveObjectsFromPage(p); |
3262 // Unlink the page from the list of pages here. We must not iterate | 3272 // Unlink the page from the list of pages here. We must not iterate |
3263 // over that page later (e.g. when scan on scavenge pages are | 3273 // over that page later (e.g. when scan on scavenge pages are |
3264 // processed). The page itself will be freed later and is still | 3274 // processed). The page itself will be freed later and is still |
3265 // reachable from the evacuation candidates list. | 3275 // reachable from the evacuation candidates list. |
3266 p->Unlink(); | 3276 p->Unlink(); |
3267 } else { | 3277 } else { |
3268 // Without room for expansion evacuation is not guaranteed to succeed. | 3278 // Without room for expansion evacuation is not guaranteed to succeed. |
3269 // Pessimistically abandon unevacuated pages. | 3279 // Pessimistically abandon unevacuated pages. |
3270 for (int j = i; j < npages; j++) { | 3280 for (int j = i; j < npages; j++) { |
3271 Page* page = evacuation_candidates_[j]; | 3281 Page* page = evacuation_candidates_[j]; |
3272 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); | 3282 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); |
3273 page->ClearEvacuationCandidate(); | 3283 page->ClearEvacuationCandidate(); |
3274 page->SetFlag(Page::RESCAN_ON_EVACUATION); | 3284 page->SetFlag(Page::RESCAN_ON_EVACUATION); |
3275 } | 3285 } |
| 3286 abandoned_pages = npages - i; |
3276 break; | 3287 break; |
3277 } | 3288 } |
3278 } | 3289 } |
3279 } | 3290 } |
3280 if (npages > 0) { | 3291 if (npages > 0) { |
3281 // Release emergency memory. | 3292 // Release emergency memory. |
3282 PagedSpaces spaces(heap()); | 3293 PagedSpaces spaces(heap()); |
3283 for (PagedSpace* space = spaces.next(); space != NULL; | 3294 for (PagedSpace* space = spaces.next(); space != NULL; |
3284 space = spaces.next()) { | 3295 space = spaces.next()) { |
3285 if (space->HasEmergencyMemory()) { | 3296 if (space->HasEmergencyMemory()) { |
3286 space->FreeEmergencyMemory(); | 3297 space->FreeEmergencyMemory(); |
3287 } | 3298 } |
3288 } | 3299 } |
| 3300 if (FLAG_trace_fragmentation) { |
| 3301 if (abandoned_pages != 0) { |
| 3302 PrintF( |
| 3303 " Abandon %d out of %d page defragmentations due to lack of " |
| 3304 "memory\n", |
| 3305 abandoned_pages, npages); |
| 3306 } else { |
| 3307 PrintF(" Defragmented %d pages\n", npages); |
| 3308 } |
| 3309 } |
3289 } | 3310 } |
3290 } | 3311 } |
3291 | 3312 |
3292 | 3313 |
3293 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3314 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3294 public: | 3315 public: |
3295 virtual Object* RetainAs(Object* object) { | 3316 virtual Object* RetainAs(Object* object) { |
3296 if (object->IsHeapObject()) { | 3317 if (object->IsHeapObject()) { |
3297 HeapObject* heap_object = HeapObject::cast(object); | 3318 HeapObject* heap_object = HeapObject::cast(object); |
3298 MapWord map_word = heap_object->map_word(); | 3319 MapWord map_word = heap_object->map_word(); |
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3622 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), | 3643 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), |
3623 &Heap::ScavengeStoreBufferCallback); | 3644 &Heap::ScavengeStoreBufferCallback); |
3624 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 3645 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
3625 } | 3646 } |
3626 | 3647 |
3627 { | 3648 { |
3628 GCTracer::Scope gc_scope(heap()->tracer(), | 3649 GCTracer::Scope gc_scope(heap()->tracer(), |
3629 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); | 3650 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); |
3630 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, | 3651 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, |
3631 code_slots_filtering_required); | 3652 code_slots_filtering_required); |
3632 if (FLAG_trace_fragmentation) { | 3653 if (FLAG_trace_fragmentation_verbose) { |
3633 PrintF(" migration slots buffer: %d\n", | 3654 PrintF(" migration slots buffer: %d\n", |
3634 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); | 3655 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
3635 } | 3656 } |
3636 | 3657 |
3637 if (compacting_ && was_marked_incrementally_) { | 3658 if (compacting_ && was_marked_incrementally_) { |
3638 // It's difficult to filter out slots recorded for large objects. | 3659 // It's difficult to filter out slots recorded for large objects. |
3639 LargeObjectIterator it(heap_->lo_space()); | 3660 LargeObjectIterator it(heap_->lo_space()); |
3640 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 3661 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
3641 // LargeObjectSpace is not swept yet thus we have to skip | 3662 // LargeObjectSpace is not swept yet thus we have to skip |
3642 // dead objects explicitly. | 3663 // dead objects explicitly. |
(...skipping 14 matching lines...) Expand all Loading... |
3657 heap()->tracer(), | 3678 heap()->tracer(), |
3658 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3679 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); |
3659 for (int i = 0; i < npages; i++) { | 3680 for (int i = 0; i < npages; i++) { |
3660 Page* p = evacuation_candidates_[i]; | 3681 Page* p = evacuation_candidates_[i]; |
3661 DCHECK(p->IsEvacuationCandidate() || | 3682 DCHECK(p->IsEvacuationCandidate() || |
3662 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3683 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3663 | 3684 |
3664 if (p->IsEvacuationCandidate()) { | 3685 if (p->IsEvacuationCandidate()) { |
3665 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), | 3686 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), |
3666 code_slots_filtering_required); | 3687 code_slots_filtering_required); |
3667 if (FLAG_trace_fragmentation) { | 3688 if (FLAG_trace_fragmentation_verbose) { |
3668 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3689 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
3669 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3690 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
3670 } | 3691 } |
3671 | 3692 |
3672 // Important: skip list should be cleared only after roots were updated | 3693 // Important: skip list should be cleared only after roots were updated |
3673 // because root iteration traverses the stack and might have to find | 3694 // because root iteration traverses the stack and might have to find |
3674 // code objects from non-updated pc pointing into evacuation candidate. | 3695 // code objects from non-updated pc pointing into evacuation candidate. |
3675 SkipList* list = p->skip_list(); | 3696 SkipList* list = p->skip_list(); |
3676 if (list != NULL) list->Clear(); | 3697 if (list != NULL) list->Clear(); |
3677 } else { | 3698 } else { |
(...skipping 918 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4596 SlotsBuffer* buffer = *buffer_address; | 4617 SlotsBuffer* buffer = *buffer_address; |
4597 while (buffer != NULL) { | 4618 while (buffer != NULL) { |
4598 SlotsBuffer* next_buffer = buffer->next(); | 4619 SlotsBuffer* next_buffer = buffer->next(); |
4599 DeallocateBuffer(buffer); | 4620 DeallocateBuffer(buffer); |
4600 buffer = next_buffer; | 4621 buffer = next_buffer; |
4601 } | 4622 } |
4602 *buffer_address = NULL; | 4623 *buffer_address = NULL; |
4603 } | 4624 } |
4604 } | 4625 } |
4605 } // namespace v8::internal | 4626 } // namespace v8::internal |
OLD | NEW |