| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
| 10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
| (...skipping 566 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 577 CodeRange* code_range = heap_->memory_allocator()->code_range(); | 577 CodeRange* code_range = heap_->memory_allocator()->code_range(); |
| 578 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); | 578 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); |
| 579 if (!code_range->UncommitRawMemory(start, length)) return false; | 579 if (!code_range->UncommitRawMemory(start, length)) return false; |
| 580 } | 580 } |
| 581 } | 581 } |
| 582 | 582 |
| 583 area_end_ = area_start_ + requested; | 583 area_end_ = area_start_ + requested; |
| 584 return true; | 584 return true; |
| 585 } | 585 } |
| 586 | 586 |
| 587 size_t MemoryChunk::CommittedPhysicalMemory() { | |
| 588 if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) | |
| 589 return size(); | |
| 590 return high_water_mark_.Value(); | |
| 591 } | |
| 592 | 587 |
| 593 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 588 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 594 MemoryChunk* other_next = other->next_chunk(); | 589 MemoryChunk* other_next = other->next_chunk(); |
| 595 | 590 |
| 596 set_next_chunk(other_next); | 591 set_next_chunk(other_next); |
| 597 set_prev_chunk(other); | 592 set_prev_chunk(other); |
| 598 other_next->set_prev_chunk(this); | 593 other_next->set_prev_chunk(this); |
| 599 other->set_next_chunk(this); | 594 other->set_next_chunk(this); |
| 600 } | 595 } |
| 601 | 596 |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 749 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 744 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
| 750 executable, owner, &reservation); | 745 executable, owner, &reservation); |
| 751 } | 746 } |
| 752 | 747 |
| 753 | 748 |
| 754 void Page::ResetFreeListStatistics() { | 749 void Page::ResetFreeListStatistics() { |
| 755 wasted_memory_ = 0; | 750 wasted_memory_ = 0; |
| 756 available_in_free_list_ = 0; | 751 available_in_free_list_ = 0; |
| 757 } | 752 } |
| 758 | 753 |
| 759 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, | |
| 760 Address start_free) { | |
| 761 // We do not allow partial shrink for code. | |
| 762 DCHECK(chunk->executable() == NOT_EXECUTABLE); | |
| 763 | |
| 764 intptr_t size; | |
| 765 base::VirtualMemory* reservation = chunk->reserved_memory(); | |
| 766 DCHECK(reservation->IsReserved()); | |
| 767 size = static_cast<intptr_t>(reservation->size()); | |
| 768 | |
| 769 size_t to_free_size = size - (start_free - chunk->address()); | |
| 770 | |
| 771 DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); | |
| 772 size_.Increment(-static_cast<intptr_t>(to_free_size)); | |
| 773 isolate_->counters()->memory_allocated()->Decrement( | |
| 774 static_cast<int>(to_free_size)); | |
| 775 chunk->set_size(size - to_free_size); | |
| 776 | |
| 777 reservation->ReleasePartial(start_free); | |
| 778 } | |
| 779 | |
| 780 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { | 754 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
| 781 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 755 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| 782 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 756 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 783 | 757 |
| 784 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 758 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 785 chunk->IsEvacuationCandidate()); | 759 chunk->IsEvacuationCandidate()); |
| 786 | 760 |
| 787 intptr_t size; | 761 intptr_t size; |
| 788 base::VirtualMemory* reservation = chunk->reserved_memory(); | 762 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 789 if (reservation->IsReserved()) { | 763 if (reservation->IsReserved()) { |
| (...skipping 2113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2903 #endif | 2877 #endif |
| 2904 } | 2878 } |
| 2905 | 2879 |
| 2906 // ----------------------------------------------------------------------------- | 2880 // ----------------------------------------------------------------------------- |
| 2907 // MapSpace implementation | 2881 // MapSpace implementation |
| 2908 | 2882 |
| 2909 #ifdef VERIFY_HEAP | 2883 #ifdef VERIFY_HEAP |
| 2910 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } | 2884 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
| 2911 #endif | 2885 #endif |
| 2912 | 2886 |
| 2913 Address LargePage::GetAddressToShrink() { | |
| 2914 HeapObject* object = GetObject(); | |
| 2915 if (executable() == EXECUTABLE) { | |
| 2916 return 0; | |
| 2917 } | |
| 2918 size_t used_size = RoundUp((object->address() - address()) + object->Size(), | |
| 2919 base::OS::CommitPageSize()); | |
| 2920 if (used_size < CommittedPhysicalMemory()) { | |
| 2921 return address() + used_size; | |
| 2922 } | |
| 2923 return 0; | |
| 2924 } | |
| 2925 | 2887 |
| 2926 // ----------------------------------------------------------------------------- | 2888 // ----------------------------------------------------------------------------- |
| 2927 // LargeObjectIterator | 2889 // LargeObjectIterator |
| 2928 | 2890 |
| 2929 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 2891 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 2930 current_ = space->first_page_; | 2892 current_ = space->first_page_; |
| 2931 } | 2893 } |
| 2932 | 2894 |
| 2933 | 2895 |
| 2934 HeapObject* LargeObjectIterator::Next() { | 2896 HeapObject* LargeObjectIterator::Next() { |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3065 HeapObject* object = current->GetObject(); | 3027 HeapObject* object = current->GetObject(); |
| 3066 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3028 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3067 DCHECK(Marking::IsBlack(mark_bit)); | 3029 DCHECK(Marking::IsBlack(mark_bit)); |
| 3068 Marking::BlackToWhite(mark_bit); | 3030 Marking::BlackToWhite(mark_bit); |
| 3069 Page::FromAddress(object->address())->ResetProgressBar(); | 3031 Page::FromAddress(object->address())->ResetProgressBar(); |
| 3070 Page::FromAddress(object->address())->ResetLiveBytes(); | 3032 Page::FromAddress(object->address())->ResetLiveBytes(); |
| 3071 current = current->next_page(); | 3033 current = current->next_page(); |
| 3072 } | 3034 } |
| 3073 } | 3035 } |
| 3074 | 3036 |
| 3037 |
| 3075 void LargeObjectSpace::FreeUnmarkedObjects() { | 3038 void LargeObjectSpace::FreeUnmarkedObjects() { |
| 3076 LargePage* previous = NULL; | 3039 LargePage* previous = NULL; |
| 3077 LargePage* current = first_page_; | 3040 LargePage* current = first_page_; |
| 3078 while (current != NULL) { | 3041 while (current != NULL) { |
| 3079 HeapObject* object = current->GetObject(); | 3042 HeapObject* object = current->GetObject(); |
| 3080 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3043 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3081 DCHECK(!Marking::IsGrey(mark_bit)); | 3044 DCHECK(!Marking::IsGrey(mark_bit)); |
| 3082 if (Marking::IsBlack(mark_bit)) { | 3045 if (Marking::IsBlack(mark_bit)) { |
| 3083 Address free_start; | |
| 3084 if ((free_start = current->GetAddressToShrink()) != 0) { | |
| 3085 // TODO(hpayer): Perform partial free concurrently. | |
| 3086 heap()->memory_allocator()->PartialFreeMemory(current, free_start); | |
| 3087 } | |
| 3088 previous = current; | 3046 previous = current; |
| 3089 current = current->next_page(); | 3047 current = current->next_page(); |
| 3090 } else { | 3048 } else { |
| 3091 LargePage* page = current; | 3049 LargePage* page = current; |
| 3092 // Cut the chunk out from the chunk list. | 3050 // Cut the chunk out from the chunk list. |
| 3093 current = current->next_page(); | 3051 current = current->next_page(); |
| 3094 if (previous == NULL) { | 3052 if (previous == NULL) { |
| 3095 first_page_ = current; | 3053 first_page_ = current; |
| 3096 } else { | 3054 } else { |
| 3097 previous->set_next_page(current); | 3055 previous->set_next_page(current); |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3240 object->ShortPrint(); | 3198 object->ShortPrint(); |
| 3241 PrintF("\n"); | 3199 PrintF("\n"); |
| 3242 } | 3200 } |
| 3243 printf(" --------------------------------------\n"); | 3201 printf(" --------------------------------------\n"); |
| 3244 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3202 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3245 } | 3203 } |
| 3246 | 3204 |
| 3247 #endif // DEBUG | 3205 #endif // DEBUG |
| 3248 } // namespace internal | 3206 } // namespace internal |
| 3249 } // namespace v8 | 3207 } // namespace v8 |
| OLD | NEW |