OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 552 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
563 CodeRange* code_range = heap_->memory_allocator()->code_range(); | 563 CodeRange* code_range = heap_->memory_allocator()->code_range(); |
564 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); | 564 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); |
565 if (!code_range->UncommitRawMemory(start, length)) return false; | 565 if (!code_range->UncommitRawMemory(start, length)) return false; |
566 } | 566 } |
567 } | 567 } |
568 | 568 |
569 area_end_ = area_start_ + requested; | 569 area_end_ = area_start_ + requested; |
570 return true; | 570 return true; |
571 } | 571 } |
572 | 572 |
| 573 size_t MemoryChunk::CommittedPhysicalMemory() { |
| 574 if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) |
| 575 return size(); |
| 576 return high_water_mark_.Value(); |
| 577 } |
573 | 578 |
574 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 579 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
575 MemoryChunk* other_next = other->next_chunk(); | 580 MemoryChunk* other_next = other->next_chunk(); |
576 | 581 |
577 set_next_chunk(other_next); | 582 set_next_chunk(other_next); |
578 set_prev_chunk(other); | 583 set_prev_chunk(other); |
579 other_next->set_prev_chunk(this); | 584 other_next->set_prev_chunk(this); |
580 other->set_next_chunk(this); | 585 other->set_next_chunk(this); |
581 } | 586 } |
582 | 587 |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
730 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 735 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
731 executable, owner, &reservation); | 736 executable, owner, &reservation); |
732 } | 737 } |
733 | 738 |
734 | 739 |
735 void Page::ResetFreeListStatistics() { | 740 void Page::ResetFreeListStatistics() { |
736 wasted_memory_ = 0; | 741 wasted_memory_ = 0; |
737 available_in_free_list_ = 0; | 742 available_in_free_list_ = 0; |
738 } | 743 } |
739 | 744 |
| 745 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
| 746 Address start_free) { |
| 747 // We do not allow partial shrink for code. |
| 748 DCHECK(chunk->executable() == NOT_EXECUTABLE); |
| 749 |
| 750 intptr_t size; |
| 751 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 752 DCHECK(reservation->IsReserved()); |
| 753 size = static_cast<intptr_t>(reservation->size()); |
| 754 |
| 755 size_t to_free_size = size - (start_free - chunk->address()); |
| 756 |
| 757 DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); |
| 758 size_.Increment(-static_cast<intptr_t>(to_free_size)); |
| 759 isolate_->counters()->memory_allocated()->Decrement( |
| 760 static_cast<int>(to_free_size)); |
| 761 chunk->set_size(size - to_free_size); |
| 762 |
| 763 reservation->ReleasePartial(start_free); |
| 764 } |
| 765 |
740 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { | 766 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
741 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 767 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
742 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 768 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
743 | 769 |
744 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 770 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
745 chunk->IsEvacuationCandidate()); | 771 chunk->IsEvacuationCandidate()); |
746 | 772 |
747 intptr_t size; | 773 intptr_t size; |
748 base::VirtualMemory* reservation = chunk->reserved_memory(); | 774 base::VirtualMemory* reservation = chunk->reserved_memory(); |
749 if (reservation->IsReserved()) { | 775 if (reservation->IsReserved()) { |
(...skipping 2151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2901 #endif | 2927 #endif |
2902 } | 2928 } |
2903 | 2929 |
2904 // ----------------------------------------------------------------------------- | 2930 // ----------------------------------------------------------------------------- |
2905 // MapSpace implementation | 2931 // MapSpace implementation |
2906 | 2932 |
2907 #ifdef VERIFY_HEAP | 2933 #ifdef VERIFY_HEAP |
2908 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } | 2934 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
2909 #endif | 2935 #endif |
2910 | 2936 |
| 2937 Address LargePage::GetAddressToShrink() { |
| 2938 HeapObject* object = GetObject(); |
| 2939 if (executable() == EXECUTABLE) { |
| 2940 return 0; |
| 2941 } |
| 2942 size_t used_size = RoundUp((object->address() - address()) + object->Size(), |
| 2943 base::OS::CommitPageSize()); |
| 2944 if (used_size < CommittedPhysicalMemory()) { |
| 2945 return address() + used_size; |
| 2946 } |
| 2947 return 0; |
| 2948 } |
| 2949 |
| 2950 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { |
| 2951 if (old_to_new_slots() != nullptr) { |
| 2952 old_to_new_slots()->RemoveRange( |
| 2953 static_cast<int>(free_start - address()), |
| 2954 static_cast<int>(free_start + size() - address())); |
| 2955 } |
| 2956 if (old_to_old_slots() != nullptr) { |
| 2957 old_to_old_slots()->RemoveRange( |
| 2958 static_cast<int>(free_start - address()), |
| 2959 static_cast<int>(free_start + size() - address())); |
| 2960 } |
| 2961 } |
2911 | 2962 |
2912 // ----------------------------------------------------------------------------- | 2963 // ----------------------------------------------------------------------------- |
2913 // LargeObjectIterator | 2964 // LargeObjectIterator |
2914 | 2965 |
2915 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 2966 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
2916 current_ = space->first_page_; | 2967 current_ = space->first_page_; |
2917 } | 2968 } |
2918 | 2969 |
2919 | 2970 |
2920 HeapObject* LargeObjectIterator::Next() { | 2971 HeapObject* LargeObjectIterator::Next() { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2974 if (page == NULL) return AllocationResult::Retry(identity()); | 3025 if (page == NULL) return AllocationResult::Retry(identity()); |
2975 DCHECK(page->area_size() >= object_size); | 3026 DCHECK(page->area_size() >= object_size); |
2976 | 3027 |
2977 size_ += static_cast<int>(page->size()); | 3028 size_ += static_cast<int>(page->size()); |
2978 AccountCommitted(static_cast<intptr_t>(page->size())); | 3029 AccountCommitted(static_cast<intptr_t>(page->size())); |
2979 objects_size_ += object_size; | 3030 objects_size_ += object_size; |
2980 page_count_++; | 3031 page_count_++; |
2981 page->set_next_page(first_page_); | 3032 page->set_next_page(first_page_); |
2982 first_page_ = page; | 3033 first_page_ = page; |
2983 | 3034 |
2984 // Register all MemoryChunk::kAlignment-aligned chunks covered by | 3035 InsertChunkMapEntries(page); |
2985 // this large page in the chunk map. | |
2986 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | |
2987 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | |
2988 for (uintptr_t key = base; key <= limit; key++) { | |
2989 base::HashMap::Entry* entry = chunk_map_.LookupOrInsert( | |
2990 reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); | |
2991 DCHECK(entry != NULL); | |
2992 entry->value = page; | |
2993 } | |
2994 | 3036 |
2995 HeapObject* object = page->GetObject(); | 3037 HeapObject* object = page->GetObject(); |
2996 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); | 3038 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
2997 | 3039 |
2998 if (Heap::ShouldZapGarbage()) { | 3040 if (Heap::ShouldZapGarbage()) { |
2999 // Make the object consistent so the heap can be verified in OldSpaceStep. | 3041 // Make the object consistent so the heap can be verified in OldSpaceStep. |
3000 // We only need to do this in debug builds or if verify_heap is on. | 3042 // We only need to do this in debug builds or if verify_heap is on. |
3001 reinterpret_cast<Object**>(object->address())[0] = | 3043 reinterpret_cast<Object**>(object->address())[0] = |
3002 heap()->fixed_array_map(); | 3044 heap()->fixed_array_map(); |
3003 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 3045 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3049 HeapObject* object = current->GetObject(); | 3091 HeapObject* object = current->GetObject(); |
3050 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3092 MarkBit mark_bit = Marking::MarkBitFrom(object); |
3051 DCHECK(Marking::IsBlack(mark_bit)); | 3093 DCHECK(Marking::IsBlack(mark_bit)); |
3052 Marking::BlackToWhite(mark_bit); | 3094 Marking::BlackToWhite(mark_bit); |
3053 Page::FromAddress(object->address())->ResetProgressBar(); | 3095 Page::FromAddress(object->address())->ResetProgressBar(); |
3054 Page::FromAddress(object->address())->ResetLiveBytes(); | 3096 Page::FromAddress(object->address())->ResetLiveBytes(); |
3055 current = current->next_page(); | 3097 current = current->next_page(); |
3056 } | 3098 } |
3057 } | 3099 } |
3058 | 3100 |
| 3101 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { |
| 3102 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 3103 // this large page in the chunk map. |
| 3104 uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 3105 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) / |
| 3106 MemoryChunk::kAlignment; |
| 3107 for (uintptr_t key = start; key <= limit; key++) { |
| 3108 base::HashMap::Entry* entry = chunk_map_.InsertNew( |
| 3109 reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
| 3110 DCHECK(entry != NULL); |
| 3111 entry->value = page; |
| 3112 } |
| 3113 } |
| 3114 |
| 3115 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { |
| 3116 RemoveChunkMapEntries(page, page->address()); |
| 3117 } |
| 3118 |
| 3119 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, |
| 3120 Address free_start) { |
| 3121 uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start), |
| 3122 MemoryChunk::kAlignment) / |
| 3123 MemoryChunk::kAlignment; |
| 3124 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) / |
| 3125 MemoryChunk::kAlignment; |
| 3126 for (uintptr_t key = start; key <= limit; key++) { |
| 3127 chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
| 3128 } |
| 3129 } |
3059 | 3130 |
3060 void LargeObjectSpace::FreeUnmarkedObjects() { | 3131 void LargeObjectSpace::FreeUnmarkedObjects() { |
3061 LargePage* previous = NULL; | 3132 LargePage* previous = NULL; |
3062 LargePage* current = first_page_; | 3133 LargePage* current = first_page_; |
3063 while (current != NULL) { | 3134 while (current != NULL) { |
3064 HeapObject* object = current->GetObject(); | 3135 HeapObject* object = current->GetObject(); |
3065 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3136 MarkBit mark_bit = Marking::MarkBitFrom(object); |
3066 DCHECK(!Marking::IsGrey(mark_bit)); | 3137 DCHECK(!Marking::IsGrey(mark_bit)); |
3067 if (Marking::IsBlack(mark_bit)) { | 3138 if (Marking::IsBlack(mark_bit)) { |
| 3139 Address free_start; |
| 3140 if ((free_start = current->GetAddressToShrink()) != 0) { |
| 3141 // TODO(hpayer): Perform partial free concurrently. |
| 3142 current->ClearOutOfLiveRangeSlots(free_start); |
| 3143 RemoveChunkMapEntries(current, free_start); |
| 3144 heap()->memory_allocator()->PartialFreeMemory(current, free_start); |
| 3145 } |
3068 previous = current; | 3146 previous = current; |
3069 current = current->next_page(); | 3147 current = current->next_page(); |
3070 } else { | 3148 } else { |
3071 LargePage* page = current; | 3149 LargePage* page = current; |
3072 // Cut the chunk out from the chunk list. | 3150 // Cut the chunk out from the chunk list. |
3073 current = current->next_page(); | 3151 current = current->next_page(); |
3074 if (previous == NULL) { | 3152 if (previous == NULL) { |
3075 first_page_ = current; | 3153 first_page_ = current; |
3076 } else { | 3154 } else { |
3077 previous->set_next_page(current); | 3155 previous->set_next_page(current); |
3078 } | 3156 } |
3079 | 3157 |
3080 // Free the chunk. | 3158 // Free the chunk. |
3081 size_ -= static_cast<int>(page->size()); | 3159 size_ -= static_cast<int>(page->size()); |
3082 AccountUncommitted(static_cast<intptr_t>(page->size())); | 3160 AccountUncommitted(static_cast<intptr_t>(page->size())); |
3083 objects_size_ -= object->Size(); | 3161 objects_size_ -= object->Size(); |
3084 page_count_--; | 3162 page_count_--; |
3085 | 3163 |
3086 // Remove entries belonging to this page. | 3164 RemoveChunkMapEntries(page); |
3087 // Use variable alignment to help pass length check (<= 80 characters) | |
3088 // of single line in tools/presubmit.py. | |
3089 const intptr_t alignment = MemoryChunk::kAlignment; | |
3090 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; | |
3091 uintptr_t limit = base + (page->size() - 1) / alignment; | |
3092 for (uintptr_t key = base; key <= limit; key++) { | |
3093 chunk_map_.Remove(reinterpret_cast<void*>(key), | |
3094 static_cast<uint32_t>(key)); | |
3095 } | |
3096 | |
3097 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 3165 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
3098 } | 3166 } |
3099 } | 3167 } |
3100 } | 3168 } |
3101 | 3169 |
3102 | 3170 |
3103 bool LargeObjectSpace::Contains(HeapObject* object) { | 3171 bool LargeObjectSpace::Contains(HeapObject* object) { |
3104 Address address = object->address(); | 3172 Address address = object->address(); |
3105 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 3173 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
3106 | 3174 |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3220 object->ShortPrint(); | 3288 object->ShortPrint(); |
3221 PrintF("\n"); | 3289 PrintF("\n"); |
3222 } | 3290 } |
3223 printf(" --------------------------------------\n"); | 3291 printf(" --------------------------------------\n"); |
3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3292 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3225 } | 3293 } |
3226 | 3294 |
3227 #endif // DEBUG | 3295 #endif // DEBUG |
3228 } // namespace internal | 3296 } // namespace internal |
3229 } // namespace v8 | 3297 } // namespace v8 |
OLD | NEW |