Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: src/heap/spaces.cc

Issue 1356533002: Reland "[heap] Introduce parallel compaction algorithm." (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Unbreak lock cycle on main thread (TSAN warning) Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slots-buffer.h" 10 #include "src/heap/slots-buffer.h"
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 73
74 // ----------------------------------------------------------------------------- 74 // -----------------------------------------------------------------------------
75 // CodeRange 75 // CodeRange
76 76
77 77
78 CodeRange::CodeRange(Isolate* isolate) 78 CodeRange::CodeRange(Isolate* isolate)
79 : isolate_(isolate), 79 : isolate_(isolate),
80 code_range_(NULL), 80 code_range_(NULL),
81 free_list_(0), 81 free_list_(0),
82 allocation_list_(0), 82 allocation_list_(0),
83 current_allocation_block_index_(0), 83 current_allocation_block_index_(0) {}
84 emergency_block_() {}
85 84
86 85
87 bool CodeRange::SetUp(size_t requested) { 86 bool CodeRange::SetUp(size_t requested) {
88 DCHECK(code_range_ == NULL); 87 DCHECK(code_range_ == NULL);
89 88
90 if (requested == 0) { 89 if (requested == 0) {
91 // When a target requires the code range feature, we put all code objects 90 // When a target requires the code range feature, we put all code objects
92 // in a kMaximalCodeRangeSize range of virtual address space, so that 91 // in a kMaximalCodeRangeSize range of virtual address space, so that
93 // they can call each other with near calls. 92 // they can call each other with near calls.
94 if (kRequiresCodeRange) { 93 if (kRequiresCodeRange) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 } 132 }
134 base += kReservedCodeRangePages * base::OS::CommitPageSize(); 133 base += kReservedCodeRangePages * base::OS::CommitPageSize();
135 } 134 }
136 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); 135 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
137 size_t size = code_range_->size() - (aligned_base - base) - 136 size_t size = code_range_->size() - (aligned_base - base) -
138 kReservedCodeRangePages * base::OS::CommitPageSize(); 137 kReservedCodeRangePages * base::OS::CommitPageSize();
139 allocation_list_.Add(FreeBlock(aligned_base, size)); 138 allocation_list_.Add(FreeBlock(aligned_base, size));
140 current_allocation_block_index_ = 0; 139 current_allocation_block_index_ = 0;
141 140
142 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); 141 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
143 ReserveEmergencyBlock();
144 return true; 142 return true;
145 } 143 }
146 144
147 145
148 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 146 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
149 const FreeBlock* right) { 147 const FreeBlock* right) {
150 // The entire point of CodeRange is that the difference between two 148 // The entire point of CodeRange is that the difference between two
151 // addresses in the range can be represented as a signed 32-bit int, 149 // addresses in the range can be represented as a signed 32-bit int,
152 // so the cast is semantically correct. 150 // so the cast is semantically correct.
153 return static_cast<int>(left->start - right->start); 151 return static_cast<int>(left->start - right->start);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 return true; 267 return true;
270 } 268 }
271 269
272 270
273 void CodeRange::ReleaseBlock(const FreeBlock* block) { 271 void CodeRange::ReleaseBlock(const FreeBlock* block) {
274 base::LockGuard<base::Mutex> guard(&code_range_mutex_); 272 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
275 free_list_.Add(*block); 273 free_list_.Add(*block);
276 } 274 }
277 275
278 276
279 void CodeRange::ReserveEmergencyBlock() {
280 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
281 if (emergency_block_.size == 0) {
282 ReserveBlock(requested_size, &emergency_block_);
283 } else {
284 DCHECK(emergency_block_.size >= requested_size);
285 }
286 }
287
288
289 void CodeRange::ReleaseEmergencyBlock() {
290 if (emergency_block_.size != 0) {
291 ReleaseBlock(&emergency_block_);
292 emergency_block_.size = 0;
293 }
294 }
295
296
297 // ----------------------------------------------------------------------------- 277 // -----------------------------------------------------------------------------
298 // MemoryAllocator 278 // MemoryAllocator
299 // 279 //
300 280
301 MemoryAllocator::MemoryAllocator(Isolate* isolate) 281 MemoryAllocator::MemoryAllocator(Isolate* isolate)
302 : isolate_(isolate), 282 : isolate_(isolate),
303 capacity_(0), 283 capacity_(0),
304 capacity_executable_(0), 284 capacity_executable_(0),
305 size_(0), 285 size_(0),
306 size_executable_(0), 286 size_executable_(0),
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 chunk->area_end_ = area_end; 465 chunk->area_end_ = area_end;
486 chunk->flags_ = 0; 466 chunk->flags_ = 0;
487 chunk->set_owner(owner); 467 chunk->set_owner(owner);
488 chunk->InitializeReservedMemory(); 468 chunk->InitializeReservedMemory();
489 chunk->slots_buffer_ = NULL; 469 chunk->slots_buffer_ = NULL;
490 chunk->skip_list_ = NULL; 470 chunk->skip_list_ = NULL;
491 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; 471 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
492 chunk->progress_bar_ = 0; 472 chunk->progress_bar_ = 0;
493 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); 473 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
494 chunk->set_parallel_sweeping(SWEEPING_DONE); 474 chunk->set_parallel_sweeping(SWEEPING_DONE);
475 chunk->parallel_compaction_state().SetValue(kCompactingDone);
495 chunk->mutex_ = NULL; 476 chunk->mutex_ = NULL;
496 chunk->available_in_small_free_list_ = 0; 477 chunk->available_in_small_free_list_ = 0;
497 chunk->available_in_medium_free_list_ = 0; 478 chunk->available_in_medium_free_list_ = 0;
498 chunk->available_in_large_free_list_ = 0; 479 chunk->available_in_large_free_list_ = 0;
499 chunk->available_in_huge_free_list_ = 0; 480 chunk->available_in_huge_free_list_ = 0;
500 chunk->non_available_small_blocks_ = 0; 481 chunk->non_available_small_blocks_ = 0;
501 chunk->ResetLiveBytes(); 482 chunk->ResetLiveBytes();
502 Bitmap::Clear(chunk); 483 Bitmap::Clear(chunk);
503 chunk->initialize_scan_on_scavenge(false); 484 chunk->initialize_scan_on_scavenge(false);
504 chunk->SetFlag(WAS_SWEPT); 485 chunk->SetFlag(WAS_SWEPT);
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after
967 ObjectSpace::kObjectSpaceCodeSpace); 948 ObjectSpace::kObjectSpaceCodeSpace);
968 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == 949 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
969 ObjectSpace::kObjectSpaceMapSpace); 950 ObjectSpace::kObjectSpaceMapSpace);
970 951
971 952
972 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, 953 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
973 Executability executable) 954 Executability executable)
974 : Space(heap, space, executable), 955 : Space(heap, space, executable),
975 free_list_(this), 956 free_list_(this),
976 unswept_free_bytes_(0), 957 unswept_free_bytes_(0),
977 end_of_unswept_pages_(NULL), 958 end_of_unswept_pages_(NULL) {
978 emergency_memory_(NULL) {
979 area_size_ = MemoryAllocator::PageAreaSize(space); 959 area_size_ = MemoryAllocator::PageAreaSize(space);
980 accounting_stats_.Clear(); 960 accounting_stats_.Clear();
981 961
982 allocation_info_.set_top(NULL); 962 allocation_info_.set_top(NULL);
983 allocation_info_.set_limit(NULL); 963 allocation_info_.set_limit(NULL);
984 964
985 anchor_.InitializeAsAnchor(this); 965 anchor_.InitializeAsAnchor(this);
986 } 966 }
987 967
988 968
989 bool PagedSpace::SetUp() { return true; } 969 bool PagedSpace::SetUp() { return true; }
990 970
991 971
992 bool PagedSpace::HasBeenSetUp() { return true; } 972 bool PagedSpace::HasBeenSetUp() { return true; }
993 973
994 974
995 void PagedSpace::TearDown() { 975 void PagedSpace::TearDown() {
996 PageIterator iterator(this); 976 PageIterator iterator(this);
997 while (iterator.has_next()) { 977 while (iterator.has_next()) {
998 heap()->isolate()->memory_allocator()->Free(iterator.next()); 978 heap()->isolate()->memory_allocator()->Free(iterator.next());
999 } 979 }
1000 anchor_.set_next_page(&anchor_); 980 anchor_.set_next_page(&anchor_);
1001 anchor_.set_prev_page(&anchor_); 981 anchor_.set_prev_page(&anchor_);
1002 accounting_stats_.Clear(); 982 accounting_stats_.Clear();
1003 } 983 }
1004 984
1005 985
986 void PagedSpace::MoveOverFreeMemory(PagedSpace* other, bool other_is_local) {
Hannes Payer (out of office) 2015/09/17 09:09:46 As discussed offline: let's solve this using a thr
Michael Lippautz 2015/09/17 09:27:15 Type-based lock it is.
987 DCHECK(identity() == other->identity());
988 // Destroy the linear allocation space of {other}. This is needed to
989 // (a) not waste the memory and
990 // (b) keep the rest of the chunk in an iterable state (filler is needed).
991 other->EmptyAllocationInfo();
992
993 // Move over the free list. Concatenate makes sure that the source free list
994 // gets properly reset after moving over all nodes.
995 intptr_t freed_bytes =
996 free_list_.Concatenate(other->free_list(), other_is_local);
997 other->accounting_stats_.AllocateBytes(freed_bytes);
998 // We do not adjust accounting_stats_ for {this} as we treat the received
999 // memory as borrowed, i.e., the originating space keeps track of its
1000 // capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly
1001 // maintained by allocating and freeing blocks.
1002 }
1003
1004
1006 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1005 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1007 // Unmerged fields: 1006 // Unmerged fields:
1008 // area_size_ 1007 // area_size_
1009 // allocation_info_ 1008 // allocation_info_
1010 // emergency_memory_
1011 // end_of_unswept_pages_ 1009 // end_of_unswept_pages_
1012 // unswept_free_bytes_ 1010 // unswept_free_bytes_
1013 // anchor_ 1011 // anchor_
1014 1012
1015 // It only makes sense to merge compatible spaces. 1013 // CompactionSpace is per definition a local space without concurrent access.
1016 DCHECK(identity() == other->identity()); 1014 // Use this information to break the potential lock cycle in
1017 1015 // FreeList::Concatenate.
1018 // Destroy the linear allocation space of {other}. This is needed to (a) not 1016 MoveOverFreeMemory(other, true);
1019 // waste the memory and (b) keep the rest of the chunk in an iterable state
1020 // (filler is needed).
1021 int linear_size = static_cast<int>(other->limit() - other->top());
1022 other->Free(other->top(), linear_size);
1023
1024 // Move over the free list.
1025 free_list_.Concatenate(other->free_list());
1026 1017
1027 // Update and clear accounting statistics. 1018 // Update and clear accounting statistics.
1028 accounting_stats_.Merge(other->accounting_stats_); 1019 accounting_stats_.Merge(other->accounting_stats_);
1029 other->accounting_stats_.Clear(); 1020 other->accounting_stats_.Reset();
1030 1021
1031 // Move over pages. 1022 // Move over pages.
1032 PageIterator it(other); 1023 PageIterator it(other);
1033 Page* p = nullptr; 1024 Page* p = nullptr;
1034 while (it.has_next()) { 1025 while (it.has_next()) {
1035 p = it.next(); 1026 p = it.next();
1036 p->Unlink(); 1027 p->Unlink();
1037 p->set_owner(this); 1028 p->set_owner(this);
1038 p->InsertAfter(anchor_.prev_page()); 1029 p->InsertAfter(anchor_.prev_page());
1039 } 1030 }
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1103 if (!CanExpand(size)) return false; 1094 if (!CanExpand(size)) return false;
1104 1095
1105 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, 1096 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1106 executable()); 1097 executable());
1107 if (p == NULL) return false; 1098 if (p == NULL) return false;
1108 1099
1109 // Pages created during bootstrapping may contain immortal immovable objects. 1100 // Pages created during bootstrapping may contain immortal immovable objects.
1110 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1101 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1111 1102
1112 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1103 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1113 DCHECK(heap()->CommittedOldGenerationMemory() <=
1114 heap()->MaxOldGenerationSize() +
1115 PagedSpace::MaxEmergencyMemoryAllocated());
1116 1104
1117 p->InsertAfter(anchor_.prev_page()); 1105 p->InsertAfter(anchor_.prev_page());
1118 1106
1119 return true; 1107 return true;
1120 } 1108 }
1121 1109
1122 1110
1123 int PagedSpace::CountTotalPages() { 1111 int PagedSpace::CountTotalPages() {
1124 PageIterator it(this); 1112 PageIterator it(this);
1125 int count = 0; 1113 int count = 0;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 page->Unlink(); 1163 page->Unlink();
1176 } 1164 }
1177 1165
1178 heap()->QueueMemoryChunkForFree(page); 1166 heap()->QueueMemoryChunkForFree(page);
1179 1167
1180 DCHECK(Capacity() > 0); 1168 DCHECK(Capacity() > 0);
1181 accounting_stats_.ShrinkSpace(AreaSize()); 1169 accounting_stats_.ShrinkSpace(AreaSize());
1182 } 1170 }
1183 1171
1184 1172
1185 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1186 // New space and large object space.
1187 static const int spaces_without_emergency_memory = 2;
1188 static const int spaces_with_emergency_memory =
1189 LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1190 return Page::kPageSize * spaces_with_emergency_memory;
1191 }
1192
1193
1194 void PagedSpace::CreateEmergencyMemory() {
1195 if (identity() == CODE_SPACE) {
1196 // Make the emergency block available to the allocator.
1197 CodeRange* code_range = heap()->isolate()->code_range();
1198 if (code_range != NULL && code_range->valid()) {
1199 code_range->ReleaseEmergencyBlock();
1200 }
1201 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1202 }
1203 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1204 AreaSize(), AreaSize(), executable(), this);
1205 }
1206
1207
1208 void PagedSpace::FreeEmergencyMemory() {
1209 Page* page = static_cast<Page*>(emergency_memory_);
1210 DCHECK(page->LiveBytes() == 0);
1211 DCHECK(AreaSize() == page->area_size());
1212 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1213 heap()->isolate()->memory_allocator()->Free(page);
1214 emergency_memory_ = NULL;
1215 }
1216
1217
1218 void PagedSpace::UseEmergencyMemory() {
1219 // Page::Initialize makes the chunk into a real page and adds it to the
1220 // accounting for this space. Unlike PagedSpace::Expand, we don't check
1221 // CanExpand first, so we can go over the limits a little here. That's OK,
1222 // because we are in the process of compacting which will free up at least as
1223 // much memory as it allocates.
1224 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1225 page->InsertAfter(anchor_.prev_page());
1226 emergency_memory_ = NULL;
1227 }
1228
1229
1230 #ifdef DEBUG 1173 #ifdef DEBUG
1231 void PagedSpace::Print() {} 1174 void PagedSpace::Print() {}
1232 #endif 1175 #endif
1233 1176
1234 #ifdef VERIFY_HEAP 1177 #ifdef VERIFY_HEAP
1235 void PagedSpace::Verify(ObjectVisitor* visitor) { 1178 void PagedSpace::Verify(ObjectVisitor* visitor) {
1236 bool allocation_pointer_found_in_space = 1179 bool allocation_pointer_found_in_space =
1237 (allocation_info_.top() == allocation_info_.limit()); 1180 (allocation_info_.top() == allocation_info_.limit());
1238 PageIterator page_iterator(this); 1181 PageIterator page_iterator(this);
1239 while (page_iterator.has_next()) { 1182 while (page_iterator.has_next()) {
(...skipping 881 matching lines...) Expand 10 before | Expand all | Expand 10 after
2121 if (from_space_.is_committed()) { 2064 if (from_space_.is_committed()) {
2122 size += from_space_.CommittedPhysicalMemory(); 2065 size += from_space_.CommittedPhysicalMemory();
2123 } 2066 }
2124 return size; 2067 return size;
2125 } 2068 }
2126 2069
2127 2070
2128 // ----------------------------------------------------------------------------- 2071 // -----------------------------------------------------------------------------
2129 // Free lists for old object spaces implementation 2072 // Free lists for old object spaces implementation
2130 2073
2131 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { 2074 intptr_t FreeListCategory::Concatenate(FreeListCategory* category,
2075 bool category_is_local) {
2132 intptr_t free_bytes = 0; 2076 intptr_t free_bytes = 0;
2133 if (category->top() != NULL) { 2077 if (category->top() != NULL) {
2134 // This is safe (not going to deadlock) since Concatenate operations 2078 // This is safe (not going to deadlock) since Concatenate operations
2135 // are never performed on the same free lists at the same time in 2079 // are never performed on the same free lists at the same time in
2136 // reverse order. 2080 // reverse order.
2137 base::LockGuard<base::Mutex> target_lock_guard(mutex()); 2081 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2138 base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); 2082 if (!category_is_local) category->mutex()->Lock();
2139 DCHECK(category->end_ != NULL); 2083 DCHECK(category->end_ != NULL);
2140 free_bytes = category->available(); 2084 free_bytes = category->available();
2141 if (end_ == NULL) { 2085 if (end_ == NULL) {
2142 end_ = category->end(); 2086 end_ = category->end();
2143 } else { 2087 } else {
2144 category->end()->set_next(top()); 2088 category->end()->set_next(top());
2145 } 2089 }
2146 set_top(category->top()); 2090 set_top(category->top());
2147 base::NoBarrier_Store(&top_, category->top_); 2091 base::NoBarrier_Store(&top_, category->top_);
2148 available_ += category->available(); 2092 available_ += category->available();
2149 category->Reset(); 2093 category->Reset();
2094 if (!category_is_local) category->mutex()->Unlock();
2150 } 2095 }
2151 return free_bytes; 2096 return free_bytes;
2152 } 2097 }
2153 2098
2154 2099
2155 void FreeListCategory::Reset() { 2100 void FreeListCategory::Reset() {
2156 set_top(NULL); 2101 set_top(NULL);
2157 set_end(NULL); 2102 set_end(NULL);
2158 set_available(0); 2103 set_available(0);
2159 } 2104 }
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2252 n = n->next(); 2197 n = n->next();
2253 } 2198 }
2254 } 2199 }
2255 2200
2256 2201
2257 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { 2202 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2258 Reset(); 2203 Reset();
2259 } 2204 }
2260 2205
2261 2206
2262 intptr_t FreeList::Concatenate(FreeList* free_list) { 2207 intptr_t FreeList::Concatenate(FreeList* free_list, bool free_list_is_local) {
2263 intptr_t free_bytes = 0; 2208 intptr_t free_bytes = 0;
2264 free_bytes += small_list_.Concatenate(free_list->small_list()); 2209 free_bytes +=
2265 free_bytes += medium_list_.Concatenate(free_list->medium_list()); 2210 small_list_.Concatenate(free_list->small_list(), free_list_is_local);
2266 free_bytes += large_list_.Concatenate(free_list->large_list()); 2211 free_bytes +=
2267 free_bytes += huge_list_.Concatenate(free_list->huge_list()); 2212 medium_list_.Concatenate(free_list->medium_list(), free_list_is_local);
2213 free_bytes +=
2214 large_list_.Concatenate(free_list->large_list(), free_list_is_local);
2215 free_bytes +=
2216 huge_list_.Concatenate(free_list->huge_list(), free_list_is_local);
2268 return free_bytes; 2217 return free_bytes;
2269 } 2218 }
2270 2219
2271 2220
2272 void FreeList::Reset() { 2221 void FreeList::Reset() {
2273 small_list_.Reset(); 2222 small_list_.Reset();
2274 medium_list_.Reset(); 2223 medium_list_.Reset();
2275 large_list_.Reset(); 2224 large_list_.Reset();
2276 huge_list_.Reset(); 2225 huge_list_.Reset();
2277 } 2226 }
(...skipping 917 matching lines...) Expand 10 before | Expand all | Expand 10 after
3195 object->ShortPrint(); 3144 object->ShortPrint();
3196 PrintF("\n"); 3145 PrintF("\n");
3197 } 3146 }
3198 printf(" --------------------------------------\n"); 3147 printf(" --------------------------------------\n");
3199 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3148 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3200 } 3149 }
3201 3150
3202 #endif // DEBUG 3151 #endif // DEBUG
3203 } // namespace internal 3152 } // namespace internal
3204 } // namespace v8 3153 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698