Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
| 10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
| (...skipping 2182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2193 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | 2193 free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
| 2194 return free_bytes; | 2194 return free_bytes; |
| 2195 } | 2195 } |
| 2196 | 2196 |
| 2197 | 2197 |
| 2198 void FreeList::Reset() { | 2198 void FreeList::Reset() { |
| 2199 small_list_.Reset(); | 2199 small_list_.Reset(); |
| 2200 medium_list_.Reset(); | 2200 medium_list_.Reset(); |
| 2201 large_list_.Reset(); | 2201 large_list_.Reset(); |
| 2202 huge_list_.Reset(); | 2202 huge_list_.Reset(); |
| 2203 unreported_allocation_ = 0; | |
| 2203 } | 2204 } |
| 2204 | 2205 |
| 2205 | 2206 |
| 2206 int FreeList::Free(Address start, int size_in_bytes) { | 2207 int FreeList::Free(Address start, int size_in_bytes) { |
| 2207 if (size_in_bytes == 0) return 0; | 2208 if (size_in_bytes == 0) return 0; |
| 2208 | 2209 |
| 2209 heap_->CreateFillerObjectAt(start, size_in_bytes); | 2210 heap_->CreateFillerObjectAt(start, size_in_bytes); |
| 2210 | 2211 |
| 2211 Page* page = Page::FromAddress(start); | 2212 Page* page = Page::FromAddress(start); |
| 2212 | 2213 |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2340 page = Page::FromAddress(node->address()); | 2341 page = Page::FromAddress(node->address()); |
| 2341 page->add_available_in_large_free_list(-(*node_size)); | 2342 page->add_available_in_large_free_list(-(*node_size)); |
| 2342 } | 2343 } |
| 2343 } | 2344 } |
| 2344 | 2345 |
| 2345 DCHECK(IsVeryLong() || available() == SumFreeLists()); | 2346 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2346 return node; | 2347 return node; |
| 2347 } | 2348 } |
| 2348 | 2349 |
| 2349 | 2350 |
| 2351 void PagedSpace::SetTopAndLimit(Address top, Address limit) { | |
| 2352 DCHECK(top == limit || | |
| 2353 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | |
| 2354 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | |
| 2355 allocation_info_.set_top(top); | |
| 2356 allocation_info_.set_limit(limit); | |
| 2357 } | |
| 2358 | |
| 2359 | |
| 2350 // Allocation on the old space free list. If it succeeds then a new linear | 2360 // Allocation on the old space free list. If it succeeds then a new linear |
| 2351 // allocation space has been set up with the top and limit of the space. If | 2361 // allocation space has been set up with the top and limit of the space. If |
| 2352 // the allocation fails then NULL is returned, and the caller can perform a GC | 2362 // the allocation fails then NULL is returned, and the caller can perform a GC |
| 2353 // or allocate a new page before retrying. | 2363 // or allocate a new page before retrying. |
| 2354 HeapObject* FreeList::Allocate(int size_in_bytes) { | 2364 HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 2355 DCHECK(0 < size_in_bytes); | 2365 DCHECK(0 < size_in_bytes); |
| 2356 DCHECK(size_in_bytes <= kMaxBlockSize); | 2366 DCHECK(size_in_bytes <= kMaxBlockSize); |
| 2357 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | 2367 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
| 2358 // Don't free list allocate if there is linear space available. | 2368 // Don't free list allocate if there is linear space available. |
| 2359 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); | 2369 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
| 2360 | 2370 |
| 2361 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2371 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 2362 // Mark the old linear allocation area with a free space map so it can be | 2372 // Mark the old linear allocation area with a free space map so it can be |
| 2363 // skipped when scanning the heap. This also puts it back in the free list | 2373 // skipped when scanning the heap. This also puts it back in the free list |
| 2364 // if it is big enough. | 2374 // if it is big enough. |
| 2365 owner_->Free(owner_->top(), old_linear_size); | 2375 owner_->Free(owner_->top(), old_linear_size); |
| 2366 | 2376 |
| 2367 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - | |
| 2368 old_linear_size); | |
| 2369 | |
| 2370 int new_node_size = 0; | 2377 int new_node_size = 0; |
| 2371 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2378 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2372 if (new_node == NULL) { | 2379 if (new_node == NULL) { |
| 2373 owner_->SetTopAndLimit(NULL, NULL); | 2380 owner_->SetTopAndLimit(NULL, NULL); |
| 2374 return NULL; | 2381 return NULL; |
| 2375 } | 2382 } |
| 2376 | 2383 |
| 2377 int bytes_left = new_node_size - size_in_bytes; | 2384 int bytes_left = new_node_size - size_in_bytes; |
| 2378 DCHECK(bytes_left >= 0); | 2385 DCHECK(bytes_left >= 0); |
| 2379 | 2386 |
| 2380 #ifdef DEBUG | 2387 #ifdef DEBUG |
| 2381 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2388 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 2382 reinterpret_cast<Object**>(new_node->address())[i] = | 2389 reinterpret_cast<Object**>(new_node->address())[i] = |
| 2383 Smi::FromInt(kCodeZapValue); | 2390 Smi::FromInt(kCodeZapValue); |
| 2384 } | 2391 } |
| 2385 #endif | 2392 #endif |
| 2386 | 2393 |
| 2387 // The old-space-step might have finished sweeping and restarted marking. | 2394 // The old-space-step might have finished sweeping and restarted marking. |
| 2388 // Verify that it did not turn the page of the new node into an evacuation | 2395 // Verify that it did not turn the page of the new node into an evacuation |
| 2389 // candidate. | 2396 // candidate. |
| 2390 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2397 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 2391 | 2398 |
| 2392 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2399 // An old-space step will mark more data per byte allocated, because old space |
| 2400 // allocation is more serious. We don't want the pause to be bigger, so we | |
| 2401 // do marking after a smaller amount of allocation. | |
| 2402 const int kThreshold = IncrementalMarking::kAllocatedThreshold * | |
| 2403 IncrementalMarking::kOldSpaceAllocationMarkingFactor; | |
| 2393 | 2404 |
| 2394 // Memory in the linear allocation area is counted as allocated. We may free | 2405 // Memory in the linear allocation area is counted as allocated. We may free |
| 2395 // a little of this again immediately - see below. | 2406 // a little of this again immediately - see below. |
| 2396 owner_->Allocate(new_node_size); | 2407 owner_->Allocate(new_node_size); |
| 2397 | 2408 |
| 2409 unreported_allocation_ += new_node_size; | |
| 2410 | |
| 2398 if (owner_->heap()->inline_allocation_disabled()) { | 2411 if (owner_->heap()->inline_allocation_disabled()) { |
| 2399 // Keep the linear allocation area empty if requested to do so, just | 2412 // Keep the linear allocation area empty if requested to do so, just |
| 2400 // return area back to the free list instead. | 2413 // return area back to the free list instead. |
| 2401 owner_->Free(new_node->address() + size_in_bytes, bytes_left); | 2414 owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
| 2402 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); | 2415 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); |
| 2403 } else if (bytes_left > kThreshold && | 2416 } else if (bytes_left > kThreshold && |
| 2404 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2417 owner_->heap()->incremental_marking()->CanDoSteps()) { |
| 2405 FLAG_incremental_marking_steps) { | |
| 2406 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2418 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2419 | |
| 2407 // We don't want to give too large linear areas to the allocator while | 2420 // We don't want to give too large linear areas to the allocator while |
| 2408 // incremental marking is going on, because we won't check again whether | 2421 // incremental marking is going on, because we won't check again whether |
| 2409 // we want to do another increment until the linear area is used up. | 2422 // we want to do another increment until the linear area is used up. |
| 2410 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2423 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 2411 new_node_size - size_in_bytes - linear_size); | 2424 new_node_size - size_in_bytes - linear_size); |
| 2412 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2425 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2413 new_node->address() + size_in_bytes + linear_size); | 2426 new_node->address() + size_in_bytes + linear_size); |
| 2414 } else if (bytes_left > 0) { | 2427 // It is important that we are done updating top and limit before we call |
| 2415 // Normally we give the rest of the node to the allocator as its new | 2428 // this. |
|
Hannes Payer (out of office)
2015/04/22 14:06:21
Can you say way in the comment to make it clear?
Erik Corry Chromium.org
2015/06/29 14:45:30
Done.
| |
| 2416 // linear allocation area. | 2429 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes + |
| 2417 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2430 linear_size); |
| 2418 new_node->address() + new_node_size); | 2431 unreported_allocation_ = 0; |
| 2419 } else { | 2432 } else { |
|
Hannes Payer (out of office)
2015/04/22 14:06:21
Please add the same comment from above also here.
Erik Corry Chromium.org
2015/06/29 14:45:30
The comment is there, just before the call to OldS
| |
| 2420 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2433 if (bytes_left > 0) { |
| 2421 // are zero. | 2434 // Normally we give the rest of the node to the allocator as its new |
| 2422 owner_->SetTopAndLimit(NULL, NULL); | 2435 // linear allocation area. |
| 2436 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | |
| 2437 new_node->address() + new_node_size); | |
| 2438 } else { | |
| 2439 // TODO(gc) Try not freeing linear allocation region when bytes_left | |
| 2440 // are zero. | |
| 2441 owner_->SetTopAndLimit(NULL, NULL); | |
| 2442 } | |
| 2443 if (unreported_allocation_ > kThreshold) { | |
| 2444 // This may start the incremental marker, or do a little work if it's | |
| 2445 // already started. It is important that we are finished updating top | |
| 2446 // and limit before we call this. | |
| 2447 owner_->heap()->incremental_marking()->OldSpaceStep( | |
| 2448 Min(kThreshold, unreported_allocation_)); | |
| 2449 unreported_allocation_ = 0; | |
| 2450 } | |
| 2423 } | 2451 } |
| 2424 | 2452 |
| 2425 return new_node; | 2453 return new_node; |
| 2426 } | 2454 } |
| 2427 | 2455 |
| 2428 | 2456 |
| 2429 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2457 intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 2430 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); | 2458 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
| 2431 p->set_available_in_huge_free_list(0); | 2459 p->set_available_in_huge_free_list(0); |
| 2432 | 2460 |
| (...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2899 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); | 2927 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
| 2900 | 2928 |
| 2901 if (Heap::ShouldZapGarbage()) { | 2929 if (Heap::ShouldZapGarbage()) { |
| 2902 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2930 // Make the object consistent so the heap can be verified in OldSpaceStep. |
| 2903 // We only need to do this in debug builds or if verify_heap is on. | 2931 // We only need to do this in debug builds or if verify_heap is on. |
| 2904 reinterpret_cast<Object**>(object->address())[0] = | 2932 reinterpret_cast<Object**>(object->address())[0] = |
| 2905 heap()->fixed_array_map(); | 2933 heap()->fixed_array_map(); |
| 2906 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2934 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| 2907 } | 2935 } |
| 2908 | 2936 |
| 2909 heap()->incremental_marking()->OldSpaceStep(object_size); | 2937 // We would like to tell the incremental marker to do a lot of work, since |
| 2938 // we just made a large allocation in old space, but that might cause a huge | |
| 2939 // pause. Underreporting here may cause the marker to speed up because it | |
| 2940 // will perceive that it is not keeping up with allocation. Although this | |
| 2941 // causes some big incremental marking steps they are not as big as this one | |
| 2942 // might have been. In testing, a very large pause was divided up into about | |
| 2943 // 12 parts. | |
| 2944 const int kThreshold = IncrementalMarking::kAllocatedThreshold * | |
| 2945 IncrementalMarking::kOldSpaceAllocationMarkingFactor; | |
| 2946 heap()->incremental_marking()->OldSpaceStep(kThreshold); | |
| 2910 return object; | 2947 return object; |
| 2911 } | 2948 } |
| 2912 | 2949 |
| 2913 | 2950 |
| 2914 size_t LargeObjectSpace::CommittedPhysicalMemory() { | 2951 size_t LargeObjectSpace::CommittedPhysicalMemory() { |
| 2915 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 2952 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 2916 size_t size = 0; | 2953 size_t size = 0; |
| 2917 LargePage* current = first_page_; | 2954 LargePage* current = first_page_; |
| 2918 while (current != NULL) { | 2955 while (current != NULL) { |
| 2919 size += current->CommittedPhysicalMemory(); | 2956 size += current->CommittedPhysicalMemory(); |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3122 object->ShortPrint(); | 3159 object->ShortPrint(); |
| 3123 PrintF("\n"); | 3160 PrintF("\n"); |
| 3124 } | 3161 } |
| 3125 printf(" --------------------------------------\n"); | 3162 printf(" --------------------------------------\n"); |
| 3126 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3163 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3127 } | 3164 } |
| 3128 | 3165 |
| 3129 #endif // DEBUG | 3166 #endif // DEBUG |
| 3130 } | 3167 } |
| 3131 } // namespace v8::internal | 3168 } // namespace v8::internal |
| OLD | NEW |