Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(994)

Side by Side Diff: src/heap/spaces.cc

Issue 1380723002: [heap] Remove unswept bytes counter (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add separate accounting for committed memory Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 906 matching lines...) Expand 10 before | Expand all | Expand 10 after
917 return false; 917 return false;
918 } 918 }
919 919
920 920
921 // ----------------------------------------------------------------------------- 921 // -----------------------------------------------------------------------------
922 // MemoryChunk implementation 922 // MemoryChunk implementation
923 923
924 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { 924 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
925 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); 925 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
926 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 926 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
927 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 927 static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
928 } 928 }
929 chunk->IncrementLiveBytes(by); 929 chunk->IncrementLiveBytes(by);
930 } 930 }
931 931
932 932
933 void MemoryChunk::ReleaseAllocatedMemory() { 933 void MemoryChunk::ReleaseAllocatedMemory() {
934 delete slots_buffer_; 934 delete slots_buffer_;
935 delete skip_list_; 935 delete skip_list_;
936 delete mutex_; 936 delete mutex_;
937 } 937 }
938 938
939 939
940 // ----------------------------------------------------------------------------- 940 // -----------------------------------------------------------------------------
941 // PagedSpace implementation 941 // PagedSpace implementation
942 942
943 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == 943 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
944 ObjectSpace::kObjectSpaceNewSpace); 944 ObjectSpace::kObjectSpaceNewSpace);
945 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) == 945 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
946 ObjectSpace::kObjectSpaceOldSpace); 946 ObjectSpace::kObjectSpaceOldSpace);
947 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == 947 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
948 ObjectSpace::kObjectSpaceCodeSpace); 948 ObjectSpace::kObjectSpaceCodeSpace);
949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == 949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
950 ObjectSpace::kObjectSpaceMapSpace); 950 ObjectSpace::kObjectSpaceMapSpace);
951 951
952 952
953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, 953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
954 Executability executable) 954 Executability executable)
955 : Space(heap, space, executable), 955 : Space(heap, space, executable),
956 free_list_(this), 956 free_list_(this),
957 unswept_free_bytes_(0),
958 end_of_unswept_pages_(NULL) { 957 end_of_unswept_pages_(NULL) {
959 area_size_ = MemoryAllocator::PageAreaSize(space); 958 area_size_ = MemoryAllocator::PageAreaSize(space);
960 accounting_stats_.Clear(); 959 accounting_stats_.Clear();
961 960
962 allocation_info_.set_top(NULL); 961 allocation_info_.set_top(NULL);
963 allocation_info_.set_limit(NULL); 962 allocation_info_.set_limit(NULL);
964 963
965 anchor_.InitializeAsAnchor(this); 964 anchor_.InitializeAsAnchor(this);
966 } 965 }
967 966
(...skipping 17 matching lines...) Expand all
985 984
986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { 985 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
987 DCHECK(identity() == other->identity()); 986 DCHECK(identity() == other->identity());
988 // Destroy the linear allocation space of {other}. This is needed to 987 // Destroy the linear allocation space of {other}. This is needed to
989 // (a) not waste the memory and 988 // (a) not waste the memory and
990 // (b) keep the rest of the chunk in an iterable state (filler is needed). 989 // (b) keep the rest of the chunk in an iterable state (filler is needed).
991 other->EmptyAllocationInfo(); 990 other->EmptyAllocationInfo();
992 991
993 // Move over the free list. Concatenate makes sure that the source free list 992 // Move over the free list. Concatenate makes sure that the source free list
994 // gets properly reset after moving over all nodes. 993 // gets properly reset after moving over all nodes.
995 intptr_t freed_bytes = free_list_.Concatenate(other->free_list()); 994 intptr_t added = free_list_.Concatenate(other->free_list());
996 995
997 // Moved memory is not recorded as allocated memory, but rather increases and 996 // Moved memory is not recorded as allocated memory, but rather increases and
998 // decreases capacity of the corresponding spaces. Used size and waste size 997 // decreases capacity of the corresponding spaces. Used size and waste size
999 // are maintained by the receiving space upon allocating and freeing blocks. 998 // are maintained by the receiving space upon allocating and freeing blocks.
1000 other->accounting_stats_.DecreaseCapacity(freed_bytes); 999 other->accounting_stats_.DecreaseCapacity(added);
1001 accounting_stats_.IncreaseCapacity(freed_bytes); 1000 accounting_stats_.IncreaseCapacity(added);
1002 } 1001 }
1003 1002
1004 1003
1005 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1004 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1006 // Unmerged fields: 1005 // Unmerged fields:
1007 // area_size_ 1006 // area_size_
1008 // allocation_info_ 1007 // allocation_info_
1009 // end_of_unswept_pages_ 1008 // end_of_unswept_pages_
1010 // unswept_free_bytes_ 1009 // unswept_free_bytes_
1011 // anchor_ 1010 // anchor_
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1087 if (snapshotable() && !HasPages()) { 1086 if (snapshotable() && !HasPages()) {
1088 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); 1087 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1089 } 1088 }
1090 1089
1091 if (!CanExpand(size)) return false; 1090 if (!CanExpand(size)) return false;
1092 1091
1093 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, 1092 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1094 executable()); 1093 executable());
1095 if (p == NULL) return false; 1094 if (p == NULL) return false;
1096 1095
1096 accounting_stats_.CommitMemory(size);
Hannes Payer (out of office) 2015/10/05 15:23:26 NewSpace still reports not the right committed amo
Michael Lippautz 2015/10/07 13:15:39 Obsolete, as committed accounting is not present i
1097
1097 // Pages created during bootstrapping may contain immortal immovable objects. 1098 // Pages created during bootstrapping may contain immortal immovable objects.
1098 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1099 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1099 1100
1100 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1101 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1101 1102
1102 p->InsertAfter(anchor_.prev_page()); 1103 p->InsertAfter(anchor_.prev_page());
1103 1104
1104 return true; 1105 return true;
1105 } 1106 }
1106 1107
(...skipping 25 matching lines...) Expand all
1132 1133
1133 void PagedSpace::ReleasePage(Page* page) { 1134 void PagedSpace::ReleasePage(Page* page) {
1134 DCHECK(page->LiveBytes() == 0); 1135 DCHECK(page->LiveBytes() == 0);
1135 DCHECK(AreaSize() == page->area_size()); 1136 DCHECK(AreaSize() == page->area_size());
1136 1137
1137 if (page->WasSwept()) { 1138 if (page->WasSwept()) {
1138 intptr_t size = free_list_.EvictFreeListItems(page); 1139 intptr_t size = free_list_.EvictFreeListItems(page);
1139 accounting_stats_.AllocateBytes(size); 1140 accounting_stats_.AllocateBytes(size);
1140 DCHECK_EQ(AreaSize(), static_cast<int>(size)); 1141 DCHECK_EQ(AreaSize(), static_cast<int>(size));
1141 } else { 1142 } else {
1142 DecreaseUnsweptFreeBytes(page); 1143 accounting_stats_.DeallocateBytes(page->area_size());
1143 } 1144 }
1144 1145
1145 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { 1146 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1146 heap()->decrement_scan_on_scavenge_pages(); 1147 heap()->decrement_scan_on_scavenge_pages();
1147 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); 1148 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1148 } 1149 }
1149 1150
1150 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1151 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1151 1152
1152 if (Page::FromAllocationTop(allocation_info_.top()) == page) { 1153 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1153 allocation_info_.set_top(NULL); 1154 allocation_info_.set_top(NULL);
1154 allocation_info_.set_limit(NULL); 1155 allocation_info_.set_limit(NULL);
1155 } 1156 }
1156 1157
1157 // If page is still in a list, unlink it from that list. 1158 // If page is still in a list, unlink it from that list.
1158 if (page->next_chunk() != NULL) { 1159 if (page->next_chunk() != NULL) {
1159 DCHECK(page->prev_chunk() != NULL); 1160 DCHECK(page->prev_chunk() != NULL);
1160 page->Unlink(); 1161 page->Unlink();
1161 } 1162 }
1162 1163
1163 heap()->QueueMemoryChunkForFree(page); 1164 heap()->QueueMemoryChunkForFree(page);
1165 accounting_stats_.CommitMemory(page->size());
Hannes Payer (out of office) 2015/10/05 15:23:26 Uncommit?
Michael Lippautz 2015/10/07 13:15:39 Done. Committed accounting landed in https://co
1164 1166
1165 DCHECK(Capacity() > 0); 1167 DCHECK(Capacity() > 0);
1166 accounting_stats_.ShrinkSpace(AreaSize()); 1168 accounting_stats_.ShrinkSpace(AreaSize());
1167 } 1169 }
1168 1170
1169 1171
1170 #ifdef DEBUG 1172 #ifdef DEBUG
1171 void PagedSpace::Print() {} 1173 void PagedSpace::Print() {}
1172 #endif 1174 #endif
1173 1175
(...skipping 1064 matching lines...) Expand 10 before | Expand all | Expand 10 after
2238 wasted_bytes_(0), 2240 wasted_bytes_(0),
2239 small_list_(this, kSmall), 2241 small_list_(this, kSmall),
2240 medium_list_(this, kMedium), 2242 medium_list_(this, kMedium),
2241 large_list_(this, kLarge), 2243 large_list_(this, kLarge),
2242 huge_list_(this, kHuge) { 2244 huge_list_(this, kHuge) {
2243 Reset(); 2245 Reset();
2244 } 2246 }
2245 2247
2246 2248
2247 intptr_t FreeList::Concatenate(FreeList* other) { 2249 intptr_t FreeList::Concatenate(FreeList* other) {
2248 intptr_t free_bytes = 0; 2250 intptr_t usable_bytes = 0;
2251 intptr_t wasted_bytes = 0;
2249 2252
2250 // This is safe (not going to deadlock) since Concatenate operations 2253 // This is safe (not going to deadlock) since Concatenate operations
2251 // are never performed on the same free lists at the same time in 2254 // are never performed on the same free lists at the same time in
2252 // reverse order. Furthermore, we only lock if the PagedSpace containing 2255 // reverse order. Furthermore, we only lock if the PagedSpace containing
2253 // the free list is know to be globally available, i.e., not local. 2256 // the free list is know to be globally available, i.e., not local.
2254 if (!owner()->is_local()) mutex_.Lock(); 2257 if (!owner()->is_local()) mutex_.Lock();
2255 if (!other->owner()->is_local()) other->mutex()->Lock(); 2258 if (!other->owner()->is_local()) other->mutex()->Lock();
2256 2259
2257 wasted_bytes_ += other->wasted_bytes_; 2260 wasted_bytes = other->wasted_bytes_;
2261 wasted_bytes_ += wasted_bytes;
2258 other->wasted_bytes_ = 0; 2262 other->wasted_bytes_ = 0;
2259 2263
2260 free_bytes += small_list_.Concatenate(other->small_list()); 2264 usable_bytes += small_list_.Concatenate(other->small_list());
2261 free_bytes += medium_list_.Concatenate(other->medium_list()); 2265 usable_bytes += medium_list_.Concatenate(other->medium_list());
2262 free_bytes += large_list_.Concatenate(other->large_list()); 2266 usable_bytes += large_list_.Concatenate(other->large_list());
2263 free_bytes += huge_list_.Concatenate(other->huge_list()); 2267 usable_bytes += huge_list_.Concatenate(other->huge_list());
2264 2268
2265 if (!other->owner()->is_local()) other->mutex()->Unlock(); 2269 if (!other->owner()->is_local()) other->mutex()->Unlock();
2266 if (!owner()->is_local()) mutex_.Unlock(); 2270 if (!owner()->is_local()) mutex_.Unlock();
2267 return free_bytes; 2271 return usable_bytes + wasted_bytes;
2268 } 2272 }
2269 2273
2270 2274
2271 void FreeList::Reset() { 2275 void FreeList::Reset() {
2272 small_list_.Reset(); 2276 small_list_.Reset();
2273 medium_list_.Reset(); 2277 medium_list_.Reset();
2274 large_list_.Reset(); 2278 large_list_.Reset();
2275 huge_list_.Reset(); 2279 huge_list_.Reset();
2276 ResetStats(); 2280 ResetStats();
2277 } 2281 }
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after
2542 2546
2543 2547
2544 // ----------------------------------------------------------------------------- 2548 // -----------------------------------------------------------------------------
2545 // OldSpace implementation 2549 // OldSpace implementation
2546 2550
2547 void PagedSpace::PrepareForMarkCompact() { 2551 void PagedSpace::PrepareForMarkCompact() {
2548 // We don't have a linear allocation area while sweeping. It will be restored 2552 // We don't have a linear allocation area while sweeping. It will be restored
2549 // on the first allocation after the sweep. 2553 // on the first allocation after the sweep.
2550 EmptyAllocationInfo(); 2554 EmptyAllocationInfo();
2551 2555
2552 // This counter will be increased for pages which will be swept by the
2553 // sweeper threads.
2554 unswept_free_bytes_ = 0;
2555
2556 // Clear the free list before a full GC---it will be rebuilt afterward. 2556 // Clear the free list before a full GC---it will be rebuilt afterward.
2557 free_list_.Reset(); 2557 free_list_.Reset();
2558 } 2558 }
2559 2559
2560 2560
2561 intptr_t PagedSpace::SizeOfObjects() { 2561 intptr_t PagedSpace::SizeOfObjects() {
2562 DCHECK(!FLAG_concurrent_sweeping || 2562 const intptr_t size = Size() - (limit() - top());
2563 heap()->mark_compact_collector()->sweeping_in_progress() ||
2564 (unswept_free_bytes_ == 0));
2565 const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
2566 DCHECK_GE(size, 0); 2563 DCHECK_GE(size, 0);
2567 USE(size); 2564 USE(size);
2568 return size; 2565 return size;
2569 } 2566 }
2570 2567
2571 2568
2572 // After we have booted, we have created a map which represents free space 2569 // After we have booted, we have created a map which represents free space
2573 // on the heap. If there was already a free list then the elements on it 2570 // on the heap. If there was already a free list then the elements on it
2574 // were created with the wrong FreeSpaceMap (normally NULL), so we need to 2571 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2575 // fix them. 2572 // fix them.
(...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after
3161 object->ShortPrint(); 3158 object->ShortPrint();
3162 PrintF("\n"); 3159 PrintF("\n");
3163 } 3160 }
3164 printf(" --------------------------------------\n"); 3161 printf(" --------------------------------------\n");
3165 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3162 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3166 } 3163 }
3167 3164
3168 #endif // DEBUG 3165 #endif // DEBUG
3169 } // namespace internal 3166 } // namespace internal
3170 } // namespace v8 3167 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698