OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
10 #include "src/heap/slots-buffer.h" | 10 #include "src/heap/slots-buffer.h" |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
73 | 73 |
74 // ----------------------------------------------------------------------------- | 74 // ----------------------------------------------------------------------------- |
75 // CodeRange | 75 // CodeRange |
76 | 76 |
77 | 77 |
78 CodeRange::CodeRange(Isolate* isolate) | 78 CodeRange::CodeRange(Isolate* isolate) |
79 : isolate_(isolate), | 79 : isolate_(isolate), |
80 code_range_(NULL), | 80 code_range_(NULL), |
81 free_list_(0), | 81 free_list_(0), |
82 allocation_list_(0), | 82 allocation_list_(0), |
83 current_allocation_block_index_(0), | 83 current_allocation_block_index_(0) {} |
84 emergency_block_() {} | |
85 | 84 |
86 | 85 |
87 bool CodeRange::SetUp(size_t requested) { | 86 bool CodeRange::SetUp(size_t requested) { |
88 DCHECK(code_range_ == NULL); | 87 DCHECK(code_range_ == NULL); |
89 | 88 |
90 if (requested == 0) { | 89 if (requested == 0) { |
91 // When a target requires the code range feature, we put all code objects | 90 // When a target requires the code range feature, we put all code objects |
92 // in a kMaximalCodeRangeSize range of virtual address space, so that | 91 // in a kMaximalCodeRangeSize range of virtual address space, so that |
93 // they can call each other with near calls. | 92 // they can call each other with near calls. |
94 if (kRequiresCodeRange) { | 93 if (kRequiresCodeRange) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 } | 132 } |
134 base += kReservedCodeRangePages * base::OS::CommitPageSize(); | 133 base += kReservedCodeRangePages * base::OS::CommitPageSize(); |
135 } | 134 } |
136 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); | 135 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); |
137 size_t size = code_range_->size() - (aligned_base - base) - | 136 size_t size = code_range_->size() - (aligned_base - base) - |
138 kReservedCodeRangePages * base::OS::CommitPageSize(); | 137 kReservedCodeRangePages * base::OS::CommitPageSize(); |
139 allocation_list_.Add(FreeBlock(aligned_base, size)); | 138 allocation_list_.Add(FreeBlock(aligned_base, size)); |
140 current_allocation_block_index_ = 0; | 139 current_allocation_block_index_ = 0; |
141 | 140 |
142 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); | 141 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
143 ReserveEmergencyBlock(); | |
144 return true; | 142 return true; |
145 } | 143 } |
146 | 144 |
147 | 145 |
148 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 146 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
149 const FreeBlock* right) { | 147 const FreeBlock* right) { |
150 // The entire point of CodeRange is that the difference between two | 148 // The entire point of CodeRange is that the difference between two |
151 // addresses in the range can be represented as a signed 32-bit int, | 149 // addresses in the range can be represented as a signed 32-bit int, |
152 // so the cast is semantically correct. | 150 // so the cast is semantically correct. |
153 return static_cast<int>(left->start - right->start); | 151 return static_cast<int>(left->start - right->start); |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
269 return true; | 267 return true; |
270 } | 268 } |
271 | 269 |
272 | 270 |
273 void CodeRange::ReleaseBlock(const FreeBlock* block) { | 271 void CodeRange::ReleaseBlock(const FreeBlock* block) { |
274 base::LockGuard<base::Mutex> guard(&code_range_mutex_); | 272 base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
275 free_list_.Add(*block); | 273 free_list_.Add(*block); |
276 } | 274 } |
277 | 275 |
278 | 276 |
279 void CodeRange::ReserveEmergencyBlock() { | |
280 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); | |
281 if (emergency_block_.size == 0) { | |
282 ReserveBlock(requested_size, &emergency_block_); | |
283 } else { | |
284 DCHECK(emergency_block_.size >= requested_size); | |
285 } | |
286 } | |
287 | |
288 | |
289 void CodeRange::ReleaseEmergencyBlock() { | |
290 if (emergency_block_.size != 0) { | |
291 ReleaseBlock(&emergency_block_); | |
292 emergency_block_.size = 0; | |
293 } | |
294 } | |
295 | |
296 | |
297 // ----------------------------------------------------------------------------- | 277 // ----------------------------------------------------------------------------- |
298 // MemoryAllocator | 278 // MemoryAllocator |
299 // | 279 // |
300 | 280 |
301 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 281 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
302 : isolate_(isolate), | 282 : isolate_(isolate), |
303 capacity_(0), | 283 capacity_(0), |
304 capacity_executable_(0), | 284 capacity_executable_(0), |
305 size_(0), | 285 size_(0), |
306 size_executable_(0), | 286 size_executable_(0), |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
485 chunk->area_end_ = area_end; | 465 chunk->area_end_ = area_end; |
486 chunk->flags_ = 0; | 466 chunk->flags_ = 0; |
487 chunk->set_owner(owner); | 467 chunk->set_owner(owner); |
488 chunk->InitializeReservedMemory(); | 468 chunk->InitializeReservedMemory(); |
489 chunk->slots_buffer_ = NULL; | 469 chunk->slots_buffer_ = NULL; |
490 chunk->skip_list_ = NULL; | 470 chunk->skip_list_ = NULL; |
491 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 471 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
492 chunk->progress_bar_ = 0; | 472 chunk->progress_bar_ = 0; |
493 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); | 473 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); |
494 chunk->set_parallel_sweeping(SWEEPING_DONE); | 474 chunk->set_parallel_sweeping(SWEEPING_DONE); |
| 475 chunk->parallel_compaction_state().SetValue(kCompactingDone); |
495 chunk->mutex_ = NULL; | 476 chunk->mutex_ = NULL; |
496 chunk->available_in_small_free_list_ = 0; | 477 chunk->available_in_small_free_list_ = 0; |
497 chunk->available_in_medium_free_list_ = 0; | 478 chunk->available_in_medium_free_list_ = 0; |
498 chunk->available_in_large_free_list_ = 0; | 479 chunk->available_in_large_free_list_ = 0; |
499 chunk->available_in_huge_free_list_ = 0; | 480 chunk->available_in_huge_free_list_ = 0; |
500 chunk->non_available_small_blocks_ = 0; | 481 chunk->non_available_small_blocks_ = 0; |
501 chunk->ResetLiveBytes(); | 482 chunk->ResetLiveBytes(); |
502 Bitmap::Clear(chunk); | 483 Bitmap::Clear(chunk); |
503 chunk->initialize_scan_on_scavenge(false); | 484 chunk->initialize_scan_on_scavenge(false); |
504 chunk->SetFlag(WAS_SWEPT); | 485 chunk->SetFlag(WAS_SWEPT); |
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
967 ObjectSpace::kObjectSpaceCodeSpace); | 948 ObjectSpace::kObjectSpaceCodeSpace); |
968 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == | 949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == |
969 ObjectSpace::kObjectSpaceMapSpace); | 950 ObjectSpace::kObjectSpaceMapSpace); |
970 | 951 |
971 | 952 |
972 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, | 953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, |
973 Executability executable) | 954 Executability executable) |
974 : Space(heap, space, executable), | 955 : Space(heap, space, executable), |
975 free_list_(this), | 956 free_list_(this), |
976 unswept_free_bytes_(0), | 957 unswept_free_bytes_(0), |
977 end_of_unswept_pages_(NULL), | 958 end_of_unswept_pages_(NULL) { |
978 emergency_memory_(NULL) { | |
979 area_size_ = MemoryAllocator::PageAreaSize(space); | 959 area_size_ = MemoryAllocator::PageAreaSize(space); |
980 accounting_stats_.Clear(); | 960 accounting_stats_.Clear(); |
981 | 961 |
982 allocation_info_.set_top(NULL); | 962 allocation_info_.set_top(NULL); |
983 allocation_info_.set_limit(NULL); | 963 allocation_info_.set_limit(NULL); |
984 | 964 |
985 anchor_.InitializeAsAnchor(this); | 965 anchor_.InitializeAsAnchor(this); |
986 } | 966 } |
987 | 967 |
988 | 968 |
989 bool PagedSpace::SetUp() { return true; } | 969 bool PagedSpace::SetUp() { return true; } |
990 | 970 |
991 | 971 |
992 bool PagedSpace::HasBeenSetUp() { return true; } | 972 bool PagedSpace::HasBeenSetUp() { return true; } |
993 | 973 |
994 | 974 |
995 void PagedSpace::TearDown() { | 975 void PagedSpace::TearDown() { |
996 PageIterator iterator(this); | 976 PageIterator iterator(this); |
997 while (iterator.has_next()) { | 977 while (iterator.has_next()) { |
998 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 978 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
999 } | 979 } |
1000 anchor_.set_next_page(&anchor_); | 980 anchor_.set_next_page(&anchor_); |
1001 anchor_.set_prev_page(&anchor_); | 981 anchor_.set_prev_page(&anchor_); |
1002 accounting_stats_.Clear(); | 982 accounting_stats_.Clear(); |
1003 } | 983 } |
1004 | 984 |
1005 | 985 |
| 986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| 987 DCHECK(identity() == other->identity()); |
| 988 // Destroy the linear allocation space of {other}. This is needed to |
| 989 // (a) not waste the memory and |
| 990 // (b) keep the rest of the chunk in an iterable state (filler is needed). |
| 991 other->EmptyAllocationInfo(); |
| 992 |
| 993 // Move over the free list. Concatenate makes sure that the source free list |
| 994 // gets properly reset after moving over all nodes. |
| 995 intptr_t freed_bytes = free_list_.Concatenate(other->free_list()); |
| 996 |
| 997 // Moved memory is not recorded as allocated memory, but rather increases and |
| 998 // decreases capacity of the corresponding spaces. Used size and waste size |
| 999 // are maintained by the receiving space upon allocating and freeing blocks. |
| 1000 other->accounting_stats_.DecreaseCapacity(freed_bytes); |
| 1001 accounting_stats_.IncreaseCapacity(freed_bytes); |
| 1002 } |
| 1003 |
| 1004 |
1006 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { | 1005 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
1007 // Unmerged fields: | 1006 // Unmerged fields: |
1008 // area_size_ | 1007 // area_size_ |
1009 // allocation_info_ | 1008 // allocation_info_ |
1010 // emergency_memory_ | |
1011 // end_of_unswept_pages_ | 1009 // end_of_unswept_pages_ |
1012 // unswept_free_bytes_ | 1010 // unswept_free_bytes_ |
1013 // anchor_ | 1011 // anchor_ |
1014 | 1012 |
1015 // It only makes sense to merge compatible spaces. | 1013 MoveOverFreeMemory(other); |
1016 DCHECK(identity() == other->identity()); | |
1017 | |
1018 // Destroy the linear allocation space of {other}. This is needed to (a) not | |
1019 // waste the memory and (b) keep the rest of the chunk in an iterable state | |
1020 // (filler is needed). | |
1021 int linear_size = static_cast<int>(other->limit() - other->top()); | |
1022 other->Free(other->top(), linear_size); | |
1023 | |
1024 // Move over the free list. | |
1025 free_list_.Concatenate(other->free_list()); | |
1026 | 1014 |
1027 // Update and clear accounting statistics. | 1015 // Update and clear accounting statistics. |
1028 accounting_stats_.Merge(other->accounting_stats_); | 1016 accounting_stats_.Merge(other->accounting_stats_); |
1029 other->accounting_stats_.Clear(); | 1017 other->accounting_stats_.Reset(); |
1030 | 1018 |
1031 // Move over pages. | 1019 // Move over pages. |
1032 PageIterator it(other); | 1020 PageIterator it(other); |
1033 Page* p = nullptr; | 1021 Page* p = nullptr; |
1034 while (it.has_next()) { | 1022 while (it.has_next()) { |
1035 p = it.next(); | 1023 p = it.next(); |
1036 p->Unlink(); | 1024 p->Unlink(); |
1037 p->set_owner(this); | 1025 p->set_owner(this); |
1038 p->InsertAfter(anchor_.prev_page()); | 1026 p->InsertAfter(anchor_.prev_page()); |
1039 } | 1027 } |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1103 if (!CanExpand(size)) return false; | 1091 if (!CanExpand(size)) return false; |
1104 | 1092 |
1105 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, | 1093 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, |
1106 executable()); | 1094 executable()); |
1107 if (p == NULL) return false; | 1095 if (p == NULL) return false; |
1108 | 1096 |
1109 // Pages created during bootstrapping may contain immortal immovable objects. | 1097 // Pages created during bootstrapping may contain immortal immovable objects. |
1110 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1098 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1111 | 1099 |
1112 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1100 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1113 DCHECK(heap()->CommittedOldGenerationMemory() <= | |
1114 heap()->MaxOldGenerationSize() + | |
1115 PagedSpace::MaxEmergencyMemoryAllocated()); | |
1116 | 1101 |
1117 p->InsertAfter(anchor_.prev_page()); | 1102 p->InsertAfter(anchor_.prev_page()); |
1118 | 1103 |
1119 return true; | 1104 return true; |
1120 } | 1105 } |
1121 | 1106 |
1122 | 1107 |
1123 int PagedSpace::CountTotalPages() { | 1108 int PagedSpace::CountTotalPages() { |
1124 PageIterator it(this); | 1109 PageIterator it(this); |
1125 int count = 0; | 1110 int count = 0; |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1175 page->Unlink(); | 1160 page->Unlink(); |
1176 } | 1161 } |
1177 | 1162 |
1178 heap()->QueueMemoryChunkForFree(page); | 1163 heap()->QueueMemoryChunkForFree(page); |
1179 | 1164 |
1180 DCHECK(Capacity() > 0); | 1165 DCHECK(Capacity() > 0); |
1181 accounting_stats_.ShrinkSpace(AreaSize()); | 1166 accounting_stats_.ShrinkSpace(AreaSize()); |
1182 } | 1167 } |
1183 | 1168 |
1184 | 1169 |
1185 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() { | |
1186 // New space and large object space. | |
1187 static const int spaces_without_emergency_memory = 2; | |
1188 static const int spaces_with_emergency_memory = | |
1189 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory; | |
1190 return Page::kPageSize * spaces_with_emergency_memory; | |
1191 } | |
1192 | |
1193 | |
1194 void PagedSpace::CreateEmergencyMemory() { | |
1195 if (identity() == CODE_SPACE) { | |
1196 // Make the emergency block available to the allocator. | |
1197 CodeRange* code_range = heap()->isolate()->code_range(); | |
1198 if (code_range != NULL && code_range->valid()) { | |
1199 code_range->ReleaseEmergencyBlock(); | |
1200 } | |
1201 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize()); | |
1202 } | |
1203 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( | |
1204 AreaSize(), AreaSize(), executable(), this); | |
1205 } | |
1206 | |
1207 | |
1208 void PagedSpace::FreeEmergencyMemory() { | |
1209 Page* page = static_cast<Page*>(emergency_memory_); | |
1210 DCHECK(page->LiveBytes() == 0); | |
1211 DCHECK(AreaSize() == page->area_size()); | |
1212 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | |
1213 heap()->isolate()->memory_allocator()->Free(page); | |
1214 emergency_memory_ = NULL; | |
1215 } | |
1216 | |
1217 | |
1218 void PagedSpace::UseEmergencyMemory() { | |
1219 // Page::Initialize makes the chunk into a real page and adds it to the | |
1220 // accounting for this space. Unlike PagedSpace::Expand, we don't check | |
1221 // CanExpand first, so we can go over the limits a little here. That's OK, | |
1222 // because we are in the process of compacting which will free up at least as | |
1223 // much memory as it allocates. | |
1224 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); | |
1225 page->InsertAfter(anchor_.prev_page()); | |
1226 emergency_memory_ = NULL; | |
1227 } | |
1228 | |
1229 | |
1230 #ifdef DEBUG | 1170 #ifdef DEBUG |
1231 void PagedSpace::Print() {} | 1171 void PagedSpace::Print() {} |
1232 #endif | 1172 #endif |
1233 | 1173 |
1234 #ifdef VERIFY_HEAP | 1174 #ifdef VERIFY_HEAP |
1235 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1175 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1236 bool allocation_pointer_found_in_space = | 1176 bool allocation_pointer_found_in_space = |
1237 (allocation_info_.top() == allocation_info_.limit()); | 1177 (allocation_info_.top() == allocation_info_.limit()); |
1238 PageIterator page_iterator(this); | 1178 PageIterator page_iterator(this); |
1239 while (page_iterator.has_next()) { | 1179 while (page_iterator.has_next()) { |
(...skipping 886 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2126 | 2066 |
2127 | 2067 |
2128 // ----------------------------------------------------------------------------- | 2068 // ----------------------------------------------------------------------------- |
2129 // Free lists for old object spaces implementation | 2069 // Free lists for old object spaces implementation |
2130 | 2070 |
2131 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { | 2071 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
2132 intptr_t free_bytes = 0; | 2072 intptr_t free_bytes = 0; |
2133 if (category->top() != NULL) { | 2073 if (category->top() != NULL) { |
2134 // This is safe (not going to deadlock) since Concatenate operations | 2074 // This is safe (not going to deadlock) since Concatenate operations |
2135 // are never performed on the same free lists at the same time in | 2075 // are never performed on the same free lists at the same time in |
2136 // reverse order. | 2076 // reverse order. Furthermore, we only lock if the PagedSpace containing |
2137 base::LockGuard<base::Mutex> target_lock_guard(mutex()); | 2077 // the free list is know to be globally available, i.e., not local. |
2138 base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); | 2078 if (!this->owner()->owner()->is_local()) mutex()->Lock(); |
| 2079 if (!category->owner()->owner()->is_local()) category->mutex()->Lock(); |
2139 DCHECK(category->end_ != NULL); | 2080 DCHECK(category->end_ != NULL); |
2140 free_bytes = category->available(); | 2081 free_bytes = category->available(); |
2141 if (end_ == NULL) { | 2082 if (end_ == NULL) { |
2142 end_ = category->end(); | 2083 end_ = category->end(); |
2143 } else { | 2084 } else { |
2144 category->end()->set_next(top()); | 2085 category->end()->set_next(top()); |
2145 } | 2086 } |
2146 set_top(category->top()); | 2087 set_top(category->top()); |
2147 base::NoBarrier_Store(&top_, category->top_); | 2088 base::NoBarrier_Store(&top_, category->top_); |
2148 available_ += category->available(); | 2089 available_ += category->available(); |
2149 category->Reset(); | 2090 category->Reset(); |
| 2091 if (!category->owner()->owner()->is_local()) category->mutex()->Unlock(); |
| 2092 if (!this->owner()->owner()->is_local()) mutex()->Unlock(); |
2150 } | 2093 } |
2151 return free_bytes; | 2094 return free_bytes; |
2152 } | 2095 } |
2153 | 2096 |
2154 | 2097 |
2155 void FreeListCategory::Reset() { | 2098 void FreeListCategory::Reset() { |
2156 set_top(NULL); | 2099 set_top(NULL); |
2157 set_end(NULL); | 2100 set_end(NULL); |
2158 set_available(0); | 2101 set_available(0); |
2159 } | 2102 } |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2247 if (*map_location == NULL) { | 2190 if (*map_location == NULL) { |
2248 *map_location = heap->free_space_map(); | 2191 *map_location = heap->free_space_map(); |
2249 } else { | 2192 } else { |
2250 DCHECK(*map_location == heap->free_space_map()); | 2193 DCHECK(*map_location == heap->free_space_map()); |
2251 } | 2194 } |
2252 n = n->next(); | 2195 n = n->next(); |
2253 } | 2196 } |
2254 } | 2197 } |
2255 | 2198 |
2256 | 2199 |
2257 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { | 2200 FreeList::FreeList(PagedSpace* owner) |
| 2201 : owner_(owner), |
| 2202 heap_(owner->heap()), |
| 2203 small_list_(this), |
| 2204 medium_list_(this), |
| 2205 large_list_(this), |
| 2206 huge_list_(this) { |
2258 Reset(); | 2207 Reset(); |
2259 } | 2208 } |
2260 | 2209 |
2261 | 2210 |
2262 intptr_t FreeList::Concatenate(FreeList* free_list) { | 2211 intptr_t FreeList::Concatenate(FreeList* free_list) { |
2263 intptr_t free_bytes = 0; | 2212 intptr_t free_bytes = 0; |
2264 free_bytes += small_list_.Concatenate(free_list->small_list()); | 2213 free_bytes += small_list_.Concatenate(free_list->small_list()); |
2265 free_bytes += medium_list_.Concatenate(free_list->medium_list()); | 2214 free_bytes += medium_list_.Concatenate(free_list->medium_list()); |
2266 free_bytes += large_list_.Concatenate(free_list->large_list()); | 2215 free_bytes += large_list_.Concatenate(free_list->large_list()); |
2267 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | 2216 free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
(...skipping 927 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3195 object->ShortPrint(); | 3144 object->ShortPrint(); |
3196 PrintF("\n"); | 3145 PrintF("\n"); |
3197 } | 3146 } |
3198 printf(" --------------------------------------\n"); | 3147 printf(" --------------------------------------\n"); |
3199 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3148 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3200 } | 3149 } |
3201 | 3150 |
3202 #endif // DEBUG | 3151 #endif // DEBUG |
3203 } // namespace internal | 3152 } // namespace internal |
3204 } // namespace v8 | 3153 } // namespace v8 |
OLD | NEW |