| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
| 10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
| (...skipping 552 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 563 CodeRange* code_range = heap_->memory_allocator()->code_range(); | 563 CodeRange* code_range = heap_->memory_allocator()->code_range(); |
| 564 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); | 564 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); |
| 565 if (!code_range->UncommitRawMemory(start, length)) return false; | 565 if (!code_range->UncommitRawMemory(start, length)) return false; |
| 566 } | 566 } |
| 567 } | 567 } |
| 568 | 568 |
| 569 area_end_ = area_start_ + requested; | 569 area_end_ = area_start_ + requested; |
| 570 return true; | 570 return true; |
| 571 } | 571 } |
| 572 | 572 |
| 573 size_t MemoryChunk::CommittedPhysicalMemory() { | |
| 574 if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) | |
| 575 return size(); | |
| 576 return high_water_mark_.Value(); | |
| 577 } | |
| 578 | 573 |
| 579 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 574 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 580 MemoryChunk* other_next = other->next_chunk(); | 575 MemoryChunk* other_next = other->next_chunk(); |
| 581 | 576 |
| 582 set_next_chunk(other_next); | 577 set_next_chunk(other_next); |
| 583 set_prev_chunk(other); | 578 set_prev_chunk(other); |
| 584 other_next->set_prev_chunk(this); | 579 other_next->set_prev_chunk(this); |
| 585 other->set_next_chunk(this); | 580 other->set_next_chunk(this); |
| 586 } | 581 } |
| 587 | 582 |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 735 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 730 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
| 736 executable, owner, &reservation); | 731 executable, owner, &reservation); |
| 737 } | 732 } |
| 738 | 733 |
| 739 | 734 |
| 740 void Page::ResetFreeListStatistics() { | 735 void Page::ResetFreeListStatistics() { |
| 741 wasted_memory_ = 0; | 736 wasted_memory_ = 0; |
| 742 available_in_free_list_ = 0; | 737 available_in_free_list_ = 0; |
| 743 } | 738 } |
| 744 | 739 |
| 745 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, | |
| 746 Address start_free) { | |
| 747 // We do not allow partial shrink for code. | |
| 748 DCHECK(chunk->executable() == NOT_EXECUTABLE); | |
| 749 | |
| 750 intptr_t size; | |
| 751 base::VirtualMemory* reservation = chunk->reserved_memory(); | |
| 752 DCHECK(reservation->IsReserved()); | |
| 753 size = static_cast<intptr_t>(reservation->size()); | |
| 754 | |
| 755 size_t to_free_size = size - (start_free - chunk->address()); | |
| 756 | |
| 757 DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); | |
| 758 size_.Increment(-static_cast<intptr_t>(to_free_size)); | |
| 759 isolate_->counters()->memory_allocated()->Decrement( | |
| 760 static_cast<int>(to_free_size)); | |
| 761 chunk->set_size(size - to_free_size); | |
| 762 | |
| 763 reservation->ReleasePartial(start_free); | |
| 764 } | |
| 765 | |
| 766 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { | 740 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
| 767 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 741 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| 768 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 742 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 769 | 743 |
| 770 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 744 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 771 chunk->IsEvacuationCandidate()); | 745 chunk->IsEvacuationCandidate()); |
| 772 | 746 |
| 773 intptr_t size; | 747 intptr_t size; |
| 774 base::VirtualMemory* reservation = chunk->reserved_memory(); | 748 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 775 if (reservation->IsReserved()) { | 749 if (reservation->IsReserved()) { |
| (...skipping 2151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2927 #endif | 2901 #endif |
| 2928 } | 2902 } |
| 2929 | 2903 |
| 2930 // ----------------------------------------------------------------------------- | 2904 // ----------------------------------------------------------------------------- |
| 2931 // MapSpace implementation | 2905 // MapSpace implementation |
| 2932 | 2906 |
| 2933 #ifdef VERIFY_HEAP | 2907 #ifdef VERIFY_HEAP |
| 2934 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } | 2908 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
| 2935 #endif | 2909 #endif |
| 2936 | 2910 |
| 2937 Address LargePage::GetAddressToShrink() { | |
| 2938 HeapObject* object = GetObject(); | |
| 2939 if (executable() == EXECUTABLE) { | |
| 2940 return 0; | |
| 2941 } | |
| 2942 size_t used_size = RoundUp((object->address() - address()) + object->Size(), | |
| 2943 base::OS::CommitPageSize()); | |
| 2944 if (used_size < CommittedPhysicalMemory()) { | |
| 2945 return address() + used_size; | |
| 2946 } | |
| 2947 return 0; | |
| 2948 } | |
| 2949 | |
| 2950 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { | |
| 2951 if (old_to_new_slots() != nullptr) { | |
| 2952 old_to_new_slots()->RemoveRange( | |
| 2953 static_cast<int>(free_start - address()), | |
| 2954 static_cast<int>(free_start + size() - address())); | |
| 2955 } | |
| 2956 if (old_to_old_slots() != nullptr) { | |
| 2957 old_to_old_slots()->RemoveRange( | |
| 2958 static_cast<int>(free_start - address()), | |
| 2959 static_cast<int>(free_start + size() - address())); | |
| 2960 } | |
| 2961 } | |
| 2962 | 2911 |
| 2963 // ----------------------------------------------------------------------------- | 2912 // ----------------------------------------------------------------------------- |
| 2964 // LargeObjectIterator | 2913 // LargeObjectIterator |
| 2965 | 2914 |
| 2966 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 2915 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 2967 current_ = space->first_page_; | 2916 current_ = space->first_page_; |
| 2968 } | 2917 } |
| 2969 | 2918 |
| 2970 | 2919 |
| 2971 HeapObject* LargeObjectIterator::Next() { | 2920 HeapObject* LargeObjectIterator::Next() { |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3025 if (page == NULL) return AllocationResult::Retry(identity()); | 2974 if (page == NULL) return AllocationResult::Retry(identity()); |
| 3026 DCHECK(page->area_size() >= object_size); | 2975 DCHECK(page->area_size() >= object_size); |
| 3027 | 2976 |
| 3028 size_ += static_cast<int>(page->size()); | 2977 size_ += static_cast<int>(page->size()); |
| 3029 AccountCommitted(static_cast<intptr_t>(page->size())); | 2978 AccountCommitted(static_cast<intptr_t>(page->size())); |
| 3030 objects_size_ += object_size; | 2979 objects_size_ += object_size; |
| 3031 page_count_++; | 2980 page_count_++; |
| 3032 page->set_next_page(first_page_); | 2981 page->set_next_page(first_page_); |
| 3033 first_page_ = page; | 2982 first_page_ = page; |
| 3034 | 2983 |
| 3035 InsertChunkMapEntries(page); | 2984 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 2985 // this large page in the chunk map. |
| 2986 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 2987 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
| 2988 for (uintptr_t key = base; key <= limit; key++) { |
| 2989 base::HashMap::Entry* entry = chunk_map_.LookupOrInsert( |
| 2990 reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
| 2991 DCHECK(entry != NULL); |
| 2992 entry->value = page; |
| 2993 } |
| 3036 | 2994 |
| 3037 HeapObject* object = page->GetObject(); | 2995 HeapObject* object = page->GetObject(); |
| 3038 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); | 2996 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
| 3039 | 2997 |
| 3040 if (Heap::ShouldZapGarbage()) { | 2998 if (Heap::ShouldZapGarbage()) { |
| 3041 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2999 // Make the object consistent so the heap can be verified in OldSpaceStep. |
| 3042 // We only need to do this in debug builds or if verify_heap is on. | 3000 // We only need to do this in debug builds or if verify_heap is on. |
| 3043 reinterpret_cast<Object**>(object->address())[0] = | 3001 reinterpret_cast<Object**>(object->address())[0] = |
| 3044 heap()->fixed_array_map(); | 3002 heap()->fixed_array_map(); |
| 3045 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 3003 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3091 HeapObject* object = current->GetObject(); | 3049 HeapObject* object = current->GetObject(); |
| 3092 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3050 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3093 DCHECK(Marking::IsBlack(mark_bit)); | 3051 DCHECK(Marking::IsBlack(mark_bit)); |
| 3094 Marking::BlackToWhite(mark_bit); | 3052 Marking::BlackToWhite(mark_bit); |
| 3095 Page::FromAddress(object->address())->ResetProgressBar(); | 3053 Page::FromAddress(object->address())->ResetProgressBar(); |
| 3096 Page::FromAddress(object->address())->ResetLiveBytes(); | 3054 Page::FromAddress(object->address())->ResetLiveBytes(); |
| 3097 current = current->next_page(); | 3055 current = current->next_page(); |
| 3098 } | 3056 } |
| 3099 } | 3057 } |
| 3100 | 3058 |
| 3101 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { | |
| 3102 // Register all MemoryChunk::kAlignment-aligned chunks covered by | |
| 3103 // this large page in the chunk map. | |
| 3104 uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | |
| 3105 uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; | |
| 3106 for (uintptr_t key = start; key <= limit; key++) { | |
| 3107 base::HashMap::Entry* entry = chunk_map_.InsertNew( | |
| 3108 reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); | |
| 3109 DCHECK(entry != NULL); | |
| 3110 entry->value = page; | |
| 3111 } | |
| 3112 } | |
| 3113 | |
| 3114 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { | |
| 3115 RemoveChunkMapEntries(page, page->address()); | |
| 3116 } | |
| 3117 | |
| 3118 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, | |
| 3119 Address free_start) { | |
| 3120 uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start), | |
| 3121 MemoryChunk::kAlignment) / | |
| 3122 MemoryChunk::kAlignment; | |
| 3123 uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; | |
| 3124 for (uintptr_t key = start; key <= limit; key++) { | |
| 3125 chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); | |
| 3126 } | |
| 3127 } | |
| 3128 | 3059 |
| 3129 void LargeObjectSpace::FreeUnmarkedObjects() { | 3060 void LargeObjectSpace::FreeUnmarkedObjects() { |
| 3130 LargePage* previous = NULL; | 3061 LargePage* previous = NULL; |
| 3131 LargePage* current = first_page_; | 3062 LargePage* current = first_page_; |
| 3132 while (current != NULL) { | 3063 while (current != NULL) { |
| 3133 HeapObject* object = current->GetObject(); | 3064 HeapObject* object = current->GetObject(); |
| 3134 MarkBit mark_bit = Marking::MarkBitFrom(object); | 3065 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3135 DCHECK(!Marking::IsGrey(mark_bit)); | 3066 DCHECK(!Marking::IsGrey(mark_bit)); |
| 3136 if (Marking::IsBlack(mark_bit)) { | 3067 if (Marking::IsBlack(mark_bit)) { |
| 3137 Address free_start; | |
| 3138 if ((free_start = current->GetAddressToShrink()) != 0) { | |
| 3139 // TODO(hpayer): Perform partial free concurrently. | |
| 3140 current->ClearOutOfLiveRangeSlots(free_start); | |
| 3141 RemoveChunkMapEntries(current, free_start); | |
| 3142 heap()->memory_allocator()->PartialFreeMemory(current, free_start); | |
| 3143 } | |
| 3144 previous = current; | 3068 previous = current; |
| 3145 current = current->next_page(); | 3069 current = current->next_page(); |
| 3146 } else { | 3070 } else { |
| 3147 LargePage* page = current; | 3071 LargePage* page = current; |
| 3148 // Cut the chunk out from the chunk list. | 3072 // Cut the chunk out from the chunk list. |
| 3149 current = current->next_page(); | 3073 current = current->next_page(); |
| 3150 if (previous == NULL) { | 3074 if (previous == NULL) { |
| 3151 first_page_ = current; | 3075 first_page_ = current; |
| 3152 } else { | 3076 } else { |
| 3153 previous->set_next_page(current); | 3077 previous->set_next_page(current); |
| 3154 } | 3078 } |
| 3155 | 3079 |
| 3156 // Free the chunk. | 3080 // Free the chunk. |
| 3157 size_ -= static_cast<int>(page->size()); | 3081 size_ -= static_cast<int>(page->size()); |
| 3158 AccountUncommitted(static_cast<intptr_t>(page->size())); | 3082 AccountUncommitted(static_cast<intptr_t>(page->size())); |
| 3159 objects_size_ -= object->Size(); | 3083 objects_size_ -= object->Size(); |
| 3160 page_count_--; | 3084 page_count_--; |
| 3161 | 3085 |
| 3162 RemoveChunkMapEntries(page); | 3086 // Remove entries belonging to this page. |
| 3087 // Use variable alignment to help pass length check (<= 80 characters) |
| 3088 // of single line in tools/presubmit.py. |
| 3089 const intptr_t alignment = MemoryChunk::kAlignment; |
| 3090 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
| 3091 uintptr_t limit = base + (page->size() - 1) / alignment; |
| 3092 for (uintptr_t key = base; key <= limit; key++) { |
| 3093 chunk_map_.Remove(reinterpret_cast<void*>(key), |
| 3094 static_cast<uint32_t>(key)); |
| 3095 } |
| 3096 |
| 3163 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 3097 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
| 3164 } | 3098 } |
| 3165 } | 3099 } |
| 3166 } | 3100 } |
| 3167 | 3101 |
| 3168 | 3102 |
| 3169 bool LargeObjectSpace::Contains(HeapObject* object) { | 3103 bool LargeObjectSpace::Contains(HeapObject* object) { |
| 3170 Address address = object->address(); | 3104 Address address = object->address(); |
| 3171 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 3105 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 3172 | 3106 |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3286 object->ShortPrint(); | 3220 object->ShortPrint(); |
| 3287 PrintF("\n"); | 3221 PrintF("\n"); |
| 3288 } | 3222 } |
| 3289 printf(" --------------------------------------\n"); | 3223 printf(" --------------------------------------\n"); |
| 3290 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3291 } | 3225 } |
| 3292 | 3226 |
| 3293 #endif // DEBUG | 3227 #endif // DEBUG |
| 3294 } // namespace internal | 3228 } // namespace internal |
| 3295 } // namespace v8 | 3229 } // namespace v8 |
| OLD | NEW |