OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
10 #include "src/heap/slots-buffer.h" | 10 #include "src/heap/slots-buffer.h" |
(...skipping 906 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
917 return false; | 917 return false; |
918 } | 918 } |
919 | 919 |
920 | 920 |
921 // ----------------------------------------------------------------------------- | 921 // ----------------------------------------------------------------------------- |
922 // MemoryChunk implementation | 922 // MemoryChunk implementation |
923 | 923 |
924 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { | 924 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { |
925 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); | 925 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
926 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { | 926 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { |
927 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); | 927 static_cast<PagedSpace*>(chunk->owner())->Allocate(by); |
928 } | 928 } |
929 chunk->IncrementLiveBytes(by); | 929 chunk->IncrementLiveBytes(by); |
930 } | 930 } |
931 | 931 |
932 | 932 |
933 void MemoryChunk::ReleaseAllocatedMemory() { | 933 void MemoryChunk::ReleaseAllocatedMemory() { |
934 delete slots_buffer_; | 934 delete slots_buffer_; |
935 delete skip_list_; | 935 delete skip_list_; |
936 delete mutex_; | 936 delete mutex_; |
937 } | 937 } |
938 | 938 |
939 | 939 |
940 // ----------------------------------------------------------------------------- | 940 // ----------------------------------------------------------------------------- |
941 // PagedSpace implementation | 941 // PagedSpace implementation |
942 | 942 |
943 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == | 943 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == |
944 ObjectSpace::kObjectSpaceNewSpace); | 944 ObjectSpace::kObjectSpaceNewSpace); |
945 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) == | 945 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) == |
946 ObjectSpace::kObjectSpaceOldSpace); | 946 ObjectSpace::kObjectSpaceOldSpace); |
947 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == | 947 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == |
948 ObjectSpace::kObjectSpaceCodeSpace); | 948 ObjectSpace::kObjectSpaceCodeSpace); |
949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == | 949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == |
950 ObjectSpace::kObjectSpaceMapSpace); | 950 ObjectSpace::kObjectSpaceMapSpace); |
951 | 951 |
952 | 952 |
953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, | 953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, |
954 Executability executable) | 954 Executability executable) |
955 : Space(heap, space, executable), | 955 : Space(heap, space, executable), |
956 free_list_(this), | 956 free_list_(this), |
957 unswept_free_bytes_(0), | |
958 end_of_unswept_pages_(NULL) { | 957 end_of_unswept_pages_(NULL) { |
959 area_size_ = MemoryAllocator::PageAreaSize(space); | 958 area_size_ = MemoryAllocator::PageAreaSize(space); |
960 accounting_stats_.Clear(); | 959 accounting_stats_.Clear(); |
961 | 960 |
962 allocation_info_.set_top(NULL); | 961 allocation_info_.set_top(NULL); |
963 allocation_info_.set_limit(NULL); | 962 allocation_info_.set_limit(NULL); |
964 | 963 |
965 anchor_.InitializeAsAnchor(this); | 964 anchor_.InitializeAsAnchor(this); |
966 } | 965 } |
967 | 966 |
(...skipping 17 matching lines...) Expand all Loading... |
985 | 984 |
986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { | 985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
987 DCHECK(identity() == other->identity()); | 986 DCHECK(identity() == other->identity()); |
988 // Destroy the linear allocation space of {other}. This is needed to | 987 // Destroy the linear allocation space of {other}. This is needed to |
989 // (a) not waste the memory and | 988 // (a) not waste the memory and |
990 // (b) keep the rest of the chunk in an iterable state (filler is needed). | 989 // (b) keep the rest of the chunk in an iterable state (filler is needed). |
991 other->EmptyAllocationInfo(); | 990 other->EmptyAllocationInfo(); |
992 | 991 |
993 // Move over the free list. Concatenate makes sure that the source free list | 992 // Move over the free list. Concatenate makes sure that the source free list |
994 // gets properly reset after moving over all nodes. | 993 // gets properly reset after moving over all nodes. |
995 intptr_t freed_bytes = free_list_.Concatenate(other->free_list()); | 994 intptr_t added = free_list_.Concatenate(other->free_list()); |
996 | 995 |
997 // Moved memory is not recorded as allocated memory, but rather increases and | 996 // Moved memory is not recorded as allocated memory, but rather increases and |
998 // decreases capacity of the corresponding spaces. Used size and waste size | 997 // decreases capacity of the corresponding spaces. Used size and waste size |
999 // are maintained by the receiving space upon allocating and freeing blocks. | 998 // are maintained by the receiving space upon allocating and freeing blocks. |
1000 other->accounting_stats_.DecreaseCapacity(freed_bytes); | 999 other->accounting_stats_.DecreaseCapacity(added); |
1001 accounting_stats_.IncreaseCapacity(freed_bytes); | 1000 accounting_stats_.IncreaseCapacity(added); |
1002 } | 1001 } |
1003 | 1002 |
1004 | 1003 |
1005 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { | 1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
1006 // Unmerged fields: | 1005 // Unmerged fields: |
1007 // area_size_ | 1006 // area_size_ |
1008 // allocation_info_ | 1007 // allocation_info_ |
1009 // end_of_unswept_pages_ | 1008 // end_of_unswept_pages_ |
1010 // unswept_free_bytes_ | 1009 // unswept_free_bytes_ |
1011 // anchor_ | 1010 // anchor_ |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1132 | 1131 |
1133 void PagedSpace::ReleasePage(Page* page) { | 1132 void PagedSpace::ReleasePage(Page* page) { |
1134 DCHECK(page->LiveBytes() == 0); | 1133 DCHECK(page->LiveBytes() == 0); |
1135 DCHECK(AreaSize() == page->area_size()); | 1134 DCHECK(AreaSize() == page->area_size()); |
1136 | 1135 |
1137 if (page->WasSwept()) { | 1136 if (page->WasSwept()) { |
1138 intptr_t size = free_list_.EvictFreeListItems(page); | 1137 intptr_t size = free_list_.EvictFreeListItems(page); |
1139 accounting_stats_.AllocateBytes(size); | 1138 accounting_stats_.AllocateBytes(size); |
1140 DCHECK_EQ(AreaSize(), static_cast<int>(size)); | 1139 DCHECK_EQ(AreaSize(), static_cast<int>(size)); |
1141 } else { | 1140 } else { |
1142 DecreaseUnsweptFreeBytes(page); | 1141 accounting_stats_.DeallocateBytes(page->area_size()); |
1143 } | 1142 } |
1144 | 1143 |
1145 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { | 1144 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { |
1146 heap()->decrement_scan_on_scavenge_pages(); | 1145 heap()->decrement_scan_on_scavenge_pages(); |
1147 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); | 1146 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
1148 } | 1147 } |
1149 | 1148 |
1150 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1149 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1151 | 1150 |
1152 if (Page::FromAllocationTop(allocation_info_.top()) == page) { | 1151 if (Page::FromAllocationTop(allocation_info_.top()) == page) { |
(...skipping 1085 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2238 wasted_bytes_(0), | 2237 wasted_bytes_(0), |
2239 small_list_(this, kSmall), | 2238 small_list_(this, kSmall), |
2240 medium_list_(this, kMedium), | 2239 medium_list_(this, kMedium), |
2241 large_list_(this, kLarge), | 2240 large_list_(this, kLarge), |
2242 huge_list_(this, kHuge) { | 2241 huge_list_(this, kHuge) { |
2243 Reset(); | 2242 Reset(); |
2244 } | 2243 } |
2245 | 2244 |
2246 | 2245 |
2247 intptr_t FreeList::Concatenate(FreeList* other) { | 2246 intptr_t FreeList::Concatenate(FreeList* other) { |
2248 intptr_t free_bytes = 0; | 2247 intptr_t usable_bytes = 0; |
| 2248 intptr_t wasted_bytes = 0; |
2249 | 2249 |
2250 // This is safe (not going to deadlock) since Concatenate operations | 2250 // This is safe (not going to deadlock) since Concatenate operations |
2251 // are never performed on the same free lists at the same time in | 2251 // are never performed on the same free lists at the same time in |
2252 // reverse order. Furthermore, we only lock if the PagedSpace containing | 2252 // reverse order. Furthermore, we only lock if the PagedSpace containing |
2253 // the free list is know to be globally available, i.e., not local. | 2253 // the free list is know to be globally available, i.e., not local. |
2254 if (!owner()->is_local()) mutex_.Lock(); | 2254 if (!owner()->is_local()) mutex_.Lock(); |
2255 if (!other->owner()->is_local()) other->mutex()->Lock(); | 2255 if (!other->owner()->is_local()) other->mutex()->Lock(); |
2256 | 2256 |
2257 wasted_bytes_ += other->wasted_bytes_; | 2257 wasted_bytes = other->wasted_bytes_; |
| 2258 wasted_bytes_ += wasted_bytes; |
2258 other->wasted_bytes_ = 0; | 2259 other->wasted_bytes_ = 0; |
2259 | 2260 |
2260 free_bytes += small_list_.Concatenate(other->small_list()); | 2261 usable_bytes += small_list_.Concatenate(other->small_list()); |
2261 free_bytes += medium_list_.Concatenate(other->medium_list()); | 2262 usable_bytes += medium_list_.Concatenate(other->medium_list()); |
2262 free_bytes += large_list_.Concatenate(other->large_list()); | 2263 usable_bytes += large_list_.Concatenate(other->large_list()); |
2263 free_bytes += huge_list_.Concatenate(other->huge_list()); | 2264 usable_bytes += huge_list_.Concatenate(other->huge_list()); |
2264 | 2265 |
2265 if (!other->owner()->is_local()) other->mutex()->Unlock(); | 2266 if (!other->owner()->is_local()) other->mutex()->Unlock(); |
2266 if (!owner()->is_local()) mutex_.Unlock(); | 2267 if (!owner()->is_local()) mutex_.Unlock(); |
2267 return free_bytes; | 2268 return usable_bytes + wasted_bytes; |
2268 } | 2269 } |
2269 | 2270 |
2270 | 2271 |
2271 void FreeList::Reset() { | 2272 void FreeList::Reset() { |
2272 small_list_.Reset(); | 2273 small_list_.Reset(); |
2273 medium_list_.Reset(); | 2274 medium_list_.Reset(); |
2274 large_list_.Reset(); | 2275 large_list_.Reset(); |
2275 huge_list_.Reset(); | 2276 huge_list_.Reset(); |
2276 ResetStats(); | 2277 ResetStats(); |
2277 } | 2278 } |
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2542 | 2543 |
2543 | 2544 |
2544 // ----------------------------------------------------------------------------- | 2545 // ----------------------------------------------------------------------------- |
2545 // OldSpace implementation | 2546 // OldSpace implementation |
2546 | 2547 |
2547 void PagedSpace::PrepareForMarkCompact() { | 2548 void PagedSpace::PrepareForMarkCompact() { |
2548 // We don't have a linear allocation area while sweeping. It will be restored | 2549 // We don't have a linear allocation area while sweeping. It will be restored |
2549 // on the first allocation after the sweep. | 2550 // on the first allocation after the sweep. |
2550 EmptyAllocationInfo(); | 2551 EmptyAllocationInfo(); |
2551 | 2552 |
2552 // This counter will be increased for pages which will be swept by the | |
2553 // sweeper threads. | |
2554 unswept_free_bytes_ = 0; | |
2555 | |
2556 // Clear the free list before a full GC---it will be rebuilt afterward. | 2553 // Clear the free list before a full GC---it will be rebuilt afterward. |
2557 free_list_.Reset(); | 2554 free_list_.Reset(); |
2558 } | 2555 } |
2559 | 2556 |
2560 | 2557 |
2561 intptr_t PagedSpace::SizeOfObjects() { | 2558 intptr_t PagedSpace::SizeOfObjects() { |
2562 DCHECK(!FLAG_concurrent_sweeping || | 2559 const intptr_t size = Size() - (limit() - top()); |
2563 heap()->mark_compact_collector()->sweeping_in_progress() || | |
2564 (unswept_free_bytes_ == 0)); | |
2565 const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top()); | |
2566 DCHECK_GE(size, 0); | 2560 DCHECK_GE(size, 0); |
2567 USE(size); | 2561 USE(size); |
2568 return size; | 2562 return size; |
2569 } | 2563 } |
2570 | 2564 |
2571 | 2565 |
2572 // After we have booted, we have created a map which represents free space | 2566 // After we have booted, we have created a map which represents free space |
2573 // on the heap. If there was already a free list then the elements on it | 2567 // on the heap. If there was already a free list then the elements on it |
2574 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2568 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
2575 // fix them. | 2569 // fix them. |
(...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3161 object->ShortPrint(); | 3155 object->ShortPrint(); |
3162 PrintF("\n"); | 3156 PrintF("\n"); |
3163 } | 3157 } |
3164 printf(" --------------------------------------\n"); | 3158 printf(" --------------------------------------\n"); |
3165 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3159 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3166 } | 3160 } |
3167 | 3161 |
3168 #endif // DEBUG | 3162 #endif // DEBUG |
3169 } // namespace internal | 3163 } // namespace internal |
3170 } // namespace v8 | 3164 } // namespace v8 |
OLD | NEW |