| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/platform/platform.h" | 7 #include "src/base/platform/platform.h" |
| 8 #include "src/full-codegen.h" | 8 #include "src/full-codegen.h" |
| 9 #include "src/macro-assembler.h" | 9 #include "src/macro-assembler.h" |
| 10 #include "src/mark-compact.h" | 10 #include "src/mark-compact.h" |
| (...skipping 918 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 929 // ----------------------------------------------------------------------------- | 929 // ----------------------------------------------------------------------------- |
| 930 // PagedSpace implementation | 930 // PagedSpace implementation |
| 931 | 931 |
| 932 PagedSpace::PagedSpace(Heap* heap, | 932 PagedSpace::PagedSpace(Heap* heap, |
| 933 intptr_t max_capacity, | 933 intptr_t max_capacity, |
| 934 AllocationSpace id, | 934 AllocationSpace id, |
| 935 Executability executable) | 935 Executability executable) |
| 936 : Space(heap, id, executable), | 936 : Space(heap, id, executable), |
| 937 free_list_(this), | 937 free_list_(this), |
| 938 is_iterable_(true), | 938 is_iterable_(true), |
| 939 is_swept_concurrently_(false), |
| 939 unswept_free_bytes_(0), | 940 unswept_free_bytes_(0), |
| 940 end_of_unswept_pages_(NULL) { | 941 end_of_unswept_pages_(NULL) { |
| 941 if (id == CODE_SPACE) { | 942 if (id == CODE_SPACE) { |
| 942 area_size_ = heap->isolate()->memory_allocator()-> | 943 area_size_ = heap->isolate()->memory_allocator()-> |
| 943 CodePageAreaSize(); | 944 CodePageAreaSize(); |
| 944 } else { | 945 } else { |
| 945 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 946 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 946 } | 947 } |
| 947 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 948 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 948 * AreaSize(); | 949 * AreaSize(); |
| (...skipping 1590 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2539 // This counter will be increased for pages which will be swept by the | 2540 // This counter will be increased for pages which will be swept by the |
| 2540 // sweeper threads. | 2541 // sweeper threads. |
| 2541 unswept_free_bytes_ = 0; | 2542 unswept_free_bytes_ = 0; |
| 2542 | 2543 |
| 2543 // Clear the free list before a full GC---it will be rebuilt afterward. | 2544 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2544 free_list_.Reset(); | 2545 free_list_.Reset(); |
| 2545 } | 2546 } |
| 2546 | 2547 |
| 2547 | 2548 |
| 2548 intptr_t PagedSpace::SizeOfObjects() { | 2549 intptr_t PagedSpace::SizeOfObjects() { |
| 2549 ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() || | 2550 ASSERT(heap()->mark_compact_collector()-> |
| 2550 (unswept_free_bytes_ == 0)); | 2551 IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0)); |
| 2551 return Size() - unswept_free_bytes_ - (limit() - top()); | 2552 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2552 } | 2553 } |
| 2553 | 2554 |
| 2554 | 2555 |
| 2555 // After we have booted, we have created a map which represents free space | 2556 // After we have booted, we have created a map which represents free space |
| 2556 // on the heap. If there was already a free list then the elements on it | 2557 // on the heap. If there was already a free list then the elements on it |
| 2557 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2558 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2558 // fix them. | 2559 // fix them. |
| 2559 void PagedSpace::RepairFreeListsAfterBoot() { | 2560 void PagedSpace::RepairFreeListsAfterBoot() { |
| 2560 free_list_.RepairLists(heap()); | 2561 free_list_.RepairLists(heap()); |
| 2561 } | 2562 } |
| 2562 | 2563 |
| 2563 | 2564 |
| 2564 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | 2565 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2565 if (allocation_info_.top() >= allocation_info_.limit()) return; | 2566 if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2566 | 2567 |
| 2567 if (Page::FromAllocationTop(allocation_info_.top())-> | 2568 if (Page::FromAllocationTop(allocation_info_.top())-> |
| 2568 IsEvacuationCandidate()) { | 2569 IsEvacuationCandidate()) { |
| 2569 // Create filler object to keep page iterable if it was iterable. | 2570 // Create filler object to keep page iterable if it was iterable. |
| 2570 int remaining = | 2571 int remaining = |
| 2571 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); | 2572 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2572 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); | 2573 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2573 | 2574 |
| 2574 allocation_info_.set_top(NULL); | 2575 allocation_info_.set_top(NULL); |
| 2575 allocation_info_.set_limit(NULL); | 2576 allocation_info_.set_limit(NULL); |
| 2576 } | 2577 } |
| 2577 } | 2578 } |
| 2578 | 2579 |
| 2579 | 2580 |
| 2580 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( | 2581 HeapObject* PagedSpace::EnsureSweepingProgress( |
| 2581 int size_in_bytes) { | 2582 int size_in_bytes) { |
| 2582 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2583 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2583 if (collector->sweeping_in_progress()) { | 2584 |
| 2585 if (collector->IsConcurrentSweepingInProgress(this)) { |
| 2586 // If sweeping is still in progress try to sweep pages on the main thread. |
| 2587 int free_chunk = |
| 2588 collector->SweepInParallel(this, size_in_bytes); |
| 2589 if (free_chunk >= size_in_bytes) { |
| 2590 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2591 // We should be able to allocate an object here since we just freed that |
| 2592 // much memory. |
| 2593 ASSERT(object != NULL); |
| 2594 if (object != NULL) return object; |
| 2595 } |
| 2596 |
| 2584 // Wait for the sweeper threads here and complete the sweeping phase. | 2597 // Wait for the sweeper threads here and complete the sweeping phase. |
| 2585 collector->EnsureSweepingCompleted(); | 2598 collector->WaitUntilSweepingCompleted(); |
| 2586 | 2599 |
| 2587 // After waiting for the sweeper threads, there may be new free-list | 2600 // After waiting for the sweeper threads, there may be new free-list |
| 2588 // entries. | 2601 // entries. |
| 2589 return free_list_.Allocate(size_in_bytes); | 2602 return free_list_.Allocate(size_in_bytes); |
| 2590 } | 2603 } |
| 2591 return NULL; | 2604 return NULL; |
| 2592 } | 2605 } |
| 2593 | 2606 |
| 2594 | 2607 |
| 2595 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 2608 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2596 // Allocation in this space has failed. | 2609 // Allocation in this space has failed. |
| 2597 | 2610 |
| 2611 // If sweeper threads are active, try to re-fill the free-lists. |
| 2598 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2612 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2599 // Sweeping is still in progress. | 2613 if (collector->IsConcurrentSweepingInProgress(this)) { |
| 2600 if (collector->sweeping_in_progress()) { | |
| 2601 // First try to refill the free-list, concurrent sweeper threads | |
| 2602 // may have freed some objects in the meantime. | |
| 2603 collector->RefillFreeList(this); | 2614 collector->RefillFreeList(this); |
| 2604 | 2615 |
| 2605 // Retry the free list allocation. | 2616 // Retry the free list allocation. |
| 2606 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2617 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2607 if (object != NULL) return object; | 2618 if (object != NULL) return object; |
| 2608 | |
| 2609 // If sweeping is still in progress try to sweep pages on the main thread. | |
| 2610 int free_chunk = | |
| 2611 collector->SweepInParallel(this, size_in_bytes); | |
| 2612 collector->RefillFreeList(this); | |
| 2613 if (free_chunk >= size_in_bytes) { | |
| 2614 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
| 2615 // We should be able to allocate an object here since we just freed that | |
| 2616 // much memory. | |
| 2617 ASSERT(object != NULL); | |
| 2618 if (object != NULL) return object; | |
| 2619 } | |
| 2620 } | 2619 } |
| 2621 | 2620 |
| 2622 // Free list allocation failed and there is no next page. Fail if we have | 2621 // Free list allocation failed and there is no next page. Fail if we have |
| 2623 // hit the old generation size limit that should cause a garbage | 2622 // hit the old generation size limit that should cause a garbage |
| 2624 // collection. | 2623 // collection. |
| 2625 if (!heap()->always_allocate() | 2624 if (!heap()->always_allocate() |
| 2626 && heap()->OldGenerationAllocationLimitReached()) { | 2625 && heap()->OldGenerationAllocationLimitReached()) { |
| 2627 // If sweeper threads are active, wait for them at that point and steal | 2626 // If sweeper threads are active, wait for them at that point and steal |
| 2628 // elements form their free-lists. | 2627 // elements form their free-lists. |
| 2629 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2628 HeapObject* object = EnsureSweepingProgress(size_in_bytes); |
| 2630 if (object != NULL) return object; | 2629 if (object != NULL) return object; |
| 2631 } | 2630 } |
| 2632 | 2631 |
| 2633 // Try to expand the space and allocate in the new next page. | 2632 // Try to expand the space and allocate in the new next page. |
| 2634 if (Expand()) { | 2633 if (Expand()) { |
| 2635 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); | 2634 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); |
| 2636 return free_list_.Allocate(size_in_bytes); | 2635 return free_list_.Allocate(size_in_bytes); |
| 2637 } | 2636 } |
| 2638 | 2637 |
| 2639 // If sweeper threads are active, wait for them at that point and steal | 2638 // If sweeper threads are active, wait for them at that point and steal |
| 2640 // elements form their free-lists. Allocation may still fail their which | 2639 // elements form their free-lists. Allocation may still fail their which |
| 2641 // would indicate that there is not enough memory for the given allocation. | 2640 // would indicate that there is not enough memory for the given allocation. |
| 2642 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2641 return EnsureSweepingProgress(size_in_bytes); |
| 2643 } | 2642 } |
| 2644 | 2643 |
| 2645 | 2644 |
| 2646 #ifdef DEBUG | 2645 #ifdef DEBUG |
| 2647 void PagedSpace::ReportCodeStatistics(Isolate* isolate) { | 2646 void PagedSpace::ReportCodeStatistics(Isolate* isolate) { |
| 2648 CommentStatistic* comments_statistics = | 2647 CommentStatistic* comments_statistics = |
| 2649 isolate->paged_space_comments_statistics(); | 2648 isolate->paged_space_comments_statistics(); |
| 2650 ReportCodeKindStatistics(isolate->code_kind_statistics()); | 2649 ReportCodeKindStatistics(isolate->code_kind_statistics()); |
| 2651 PrintF("Code comment statistics (\" [ comment-txt : size/ " | 2650 PrintF("Code comment statistics (\" [ comment-txt : size/ " |
| 2652 "count (average)\"):\n"); | 2651 "count (average)\"):\n"); |
| (...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3150 object->ShortPrint(); | 3149 object->ShortPrint(); |
| 3151 PrintF("\n"); | 3150 PrintF("\n"); |
| 3152 } | 3151 } |
| 3153 printf(" --------------------------------------\n"); | 3152 printf(" --------------------------------------\n"); |
| 3154 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3153 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3155 } | 3154 } |
| 3156 | 3155 |
| 3157 #endif // DEBUG | 3156 #endif // DEBUG |
| 3158 | 3157 |
| 3159 } } // namespace v8::internal | 3158 } } // namespace v8::internal |
| OLD | NEW |