OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
(...skipping 2187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2198 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | 2198 free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
2199 return free_bytes; | 2199 return free_bytes; |
2200 } | 2200 } |
2201 | 2201 |
2202 | 2202 |
2203 void FreeList::Reset() { | 2203 void FreeList::Reset() { |
2204 small_list_.Reset(); | 2204 small_list_.Reset(); |
2205 medium_list_.Reset(); | 2205 medium_list_.Reset(); |
2206 large_list_.Reset(); | 2206 large_list_.Reset(); |
2207 huge_list_.Reset(); | 2207 huge_list_.Reset(); |
| 2208 unreported_allocation_ = 0; |
2208 } | 2209 } |
2209 | 2210 |
2210 | 2211 |
2211 int FreeList::Free(Address start, int size_in_bytes) { | 2212 int FreeList::Free(Address start, int size_in_bytes) { |
2212 if (size_in_bytes == 0) return 0; | 2213 if (size_in_bytes == 0) return 0; |
2213 | 2214 |
2214 heap_->CreateFillerObjectAt(start, size_in_bytes); | 2215 heap_->CreateFillerObjectAt(start, size_in_bytes); |
2215 | 2216 |
2216 Page* page = Page::FromAddress(start); | 2217 Page* page = Page::FromAddress(start); |
2217 | 2218 |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2345 page = Page::FromAddress(node->address()); | 2346 page = Page::FromAddress(node->address()); |
2346 page->add_available_in_large_free_list(-(*node_size)); | 2347 page->add_available_in_large_free_list(-(*node_size)); |
2347 } | 2348 } |
2348 } | 2349 } |
2349 | 2350 |
2350 DCHECK(IsVeryLong() || available() == SumFreeLists()); | 2351 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2351 return node; | 2352 return node; |
2352 } | 2353 } |
2353 | 2354 |
2354 | 2355 |
| 2356 void PagedSpace::SetTopAndLimit(Address top, Address limit) { |
| 2357 DCHECK(top == limit || |
| 2358 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| 2359 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 2360 allocation_info_.set_top(top); |
| 2361 allocation_info_.set_limit(limit); |
| 2362 } |
| 2363 |
| 2364 |
2355 // Allocation on the old space free list. If it succeeds then a new linear | 2365 // Allocation on the old space free list. If it succeeds then a new linear |
2356 // allocation space has been set up with the top and limit of the space. If | 2366 // allocation space has been set up with the top and limit of the space. If |
2357 // the allocation fails then NULL is returned, and the caller can perform a GC | 2367 // the allocation fails then NULL is returned, and the caller can perform a GC |
2358 // or allocate a new page before retrying. | 2368 // or allocate a new page before retrying. |
2359 HeapObject* FreeList::Allocate(int size_in_bytes) { | 2369 HeapObject* FreeList::Allocate(int size_in_bytes) { |
2360 DCHECK(0 < size_in_bytes); | 2370 DCHECK(0 < size_in_bytes); |
2361 DCHECK(size_in_bytes <= kMaxBlockSize); | 2371 DCHECK(size_in_bytes <= kMaxBlockSize); |
2362 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | 2372 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
2363 // Don't free list allocate if there is linear space available. | 2373 // Don't free list allocate if there is linear space available. |
2364 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); | 2374 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
2365 | 2375 |
2366 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2376 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
2367 // Mark the old linear allocation area with a free space map so it can be | 2377 // Mark the old linear allocation area with a free space map so it can be |
2368 // skipped when scanning the heap. This also puts it back in the free list | 2378 // skipped when scanning the heap. This also puts it back in the free list |
2369 // if it is big enough. | 2379 // if it is big enough. |
2370 owner_->Free(owner_->top(), old_linear_size); | 2380 owner_->Free(owner_->top(), old_linear_size); |
2371 | 2381 |
2372 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - | |
2373 old_linear_size); | |
2374 | |
2375 int new_node_size = 0; | 2382 int new_node_size = 0; |
2376 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2383 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
2377 if (new_node == NULL) { | 2384 if (new_node == NULL) { |
2378 owner_->SetTopAndLimit(NULL, NULL); | 2385 owner_->SetTopAndLimit(NULL, NULL); |
2379 return NULL; | 2386 return NULL; |
2380 } | 2387 } |
2381 | 2388 |
2382 int bytes_left = new_node_size - size_in_bytes; | 2389 int bytes_left = new_node_size - size_in_bytes; |
2383 DCHECK(bytes_left >= 0); | 2390 DCHECK(bytes_left >= 0); |
2384 | 2391 |
2385 #ifdef DEBUG | 2392 #ifdef DEBUG |
2386 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2393 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
2387 reinterpret_cast<Object**>(new_node->address())[i] = | 2394 reinterpret_cast<Object**>(new_node->address())[i] = |
2388 Smi::FromInt(kCodeZapValue); | 2395 Smi::FromInt(kCodeZapValue); |
2389 } | 2396 } |
2390 #endif | 2397 #endif |
2391 | 2398 |
2392 // The old-space-step might have finished sweeping and restarted marking. | 2399 // The old-space-step might have finished sweeping and restarted marking. |
2393 // Verify that it did not turn the page of the new node into an evacuation | 2400 // Verify that it did not turn the page of the new node into an evacuation |
2394 // candidate. | 2401 // candidate. |
2395 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2402 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
2396 | 2403 |
2397 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2404 // An old-space step will mark more data per byte allocated, because old space |
| 2405 // allocation is more serious. We don't want the pause to be bigger, so we |
| 2406 // do marking after a smaller amount of allocation. |
| 2407 const int kThreshold = IncrementalMarking::kAllocatedThreshold * |
| 2408 IncrementalMarking::kOldSpaceAllocationMarkingFactor; |
2398 | 2409 |
2399 // Memory in the linear allocation area is counted as allocated. We may free | 2410 // Memory in the linear allocation area is counted as allocated. We may free |
2400 // a little of this again immediately - see below. | 2411 // a little of this again immediately - see below. |
2401 owner_->Allocate(new_node_size); | 2412 owner_->Allocate(new_node_size); |
2402 | 2413 |
| 2414 unreported_allocation_ += new_node_size; |
| 2415 |
2403 if (owner_->heap()->inline_allocation_disabled()) { | 2416 if (owner_->heap()->inline_allocation_disabled()) { |
2404 // Keep the linear allocation area empty if requested to do so, just | 2417 // Keep the linear allocation area empty if requested to do so, just |
2405 // return area back to the free list instead. | 2418 // return area back to the free list instead. |
2406 owner_->Free(new_node->address() + size_in_bytes, bytes_left); | 2419 owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
2407 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); | 2420 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); |
2408 } else if (bytes_left > kThreshold && | 2421 } else if (bytes_left > kThreshold && |
2409 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2422 owner_->heap()->incremental_marking()->CanDoSteps()) { |
2410 FLAG_incremental_marking_steps) { | |
2411 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2423 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2424 |
2412 // We don't want to give too large linear areas to the allocator while | 2425 // We don't want to give too large linear areas to the allocator while |
2413 // incremental marking is going on, because we won't check again whether | 2426 // incremental marking is going on, because we won't check again whether |
2414 // we want to do another increment until the linear area is used up. | 2427 // we want to do another increment until the linear area is used up. |
2415 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2428 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
2416 new_node_size - size_in_bytes - linear_size); | 2429 new_node_size - size_in_bytes - linear_size); |
2417 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2430 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
2418 new_node->address() + size_in_bytes + linear_size); | 2431 new_node->address() + size_in_bytes + linear_size); |
2419 } else if (bytes_left > 0) { | 2432 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes + |
2420 // Normally we give the rest of the node to the allocator as its new | 2433 linear_size); |
2421 // linear allocation area. | 2434 unreported_allocation_ = 0; |
2422 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | |
2423 new_node->address() + new_node_size); | |
2424 } else { | 2435 } else { |
2425 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2436 if (unreported_allocation_ > kThreshold) { |
2426 // are zero. | 2437 // This may start the incremental marker, or do a little work if it's |
2427 owner_->SetTopAndLimit(NULL, NULL); | 2438 // already started. |
| 2439 owner_->heap()->incremental_marking()->OldSpaceStep( |
| 2440 Min(kThreshold, unreported_allocation_)); |
| 2441 unreported_allocation_ = 0; |
| 2442 } |
| 2443 if (bytes_left > 0) { |
| 2444 // Normally we give the rest of the node to the allocator as its new |
| 2445 // linear allocation area. |
| 2446 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2447 new_node->address() + new_node_size); |
| 2448 } else { |
| 2449 // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 2450 // are zero. |
| 2451 owner_->SetTopAndLimit(NULL, NULL); |
| 2452 } |
2428 } | 2453 } |
2429 | 2454 |
2430 return new_node; | 2455 return new_node; |
2431 } | 2456 } |
2432 | 2457 |
2433 | 2458 |
2434 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2459 intptr_t FreeList::EvictFreeListItems(Page* p) { |
2435 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); | 2460 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
2436 p->set_available_in_huge_free_list(0); | 2461 p->set_available_in_huge_free_list(0); |
2437 | 2462 |
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2913 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); | 2938 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
2914 | 2939 |
2915 if (Heap::ShouldZapGarbage()) { | 2940 if (Heap::ShouldZapGarbage()) { |
2916 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2941 // Make the object consistent so the heap can be verified in OldSpaceStep. |
2917 // We only need to do this in debug builds or if verify_heap is on. | 2942 // We only need to do this in debug builds or if verify_heap is on. |
2918 reinterpret_cast<Object**>(object->address())[0] = | 2943 reinterpret_cast<Object**>(object->address())[0] = |
2919 heap()->fixed_array_map(); | 2944 heap()->fixed_array_map(); |
2920 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2945 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
2921 } | 2946 } |
2922 | 2947 |
2923 heap()->incremental_marking()->OldSpaceStep(object_size); | 2948 // We would like to tell the incremental marker to do a lot of work, since |
| 2949 // we just made a large allocation in old space, but that might cause a huge |
| 2950 // pause. Underreporting here may cause the marker to speed up because it |
| 2951 // will perceive that it is not keeping up with allocation. Although this |
| 2952 // causes some big incremental marking steps they are not as big as this one |
| 2953 // might have been. In testing, a very large pause was divided up into about |
| 2954 // 12 parts. |
| 2955 const int kThreshold = IncrementalMarking::kAllocatedThreshold * |
| 2956 IncrementalMarking::kOldSpaceAllocationMarkingFactor; |
| 2957 heap()->incremental_marking()->OldSpaceStep(kThreshold); |
2924 return object; | 2958 return object; |
2925 } | 2959 } |
2926 | 2960 |
2927 | 2961 |
2928 size_t LargeObjectSpace::CommittedPhysicalMemory() { | 2962 size_t LargeObjectSpace::CommittedPhysicalMemory() { |
2929 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 2963 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
2930 size_t size = 0; | 2964 size_t size = 0; |
2931 LargePage* current = first_page_; | 2965 LargePage* current = first_page_; |
2932 while (current != NULL) { | 2966 while (current != NULL) { |
2933 size += current->CommittedPhysicalMemory(); | 2967 size += current->CommittedPhysicalMemory(); |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3136 object->ShortPrint(); | 3170 object->ShortPrint(); |
3137 PrintF("\n"); | 3171 PrintF("\n"); |
3138 } | 3172 } |
3139 printf(" --------------------------------------\n"); | 3173 printf(" --------------------------------------\n"); |
3140 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3174 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3141 } | 3175 } |
3142 | 3176 |
3143 #endif // DEBUG | 3177 #endif // DEBUG |
3144 } | 3178 } |
3145 } // namespace v8::internal | 3179 } // namespace v8::internal |
OLD | NEW |