| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 935 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 946 // ----------------------------------------------------------------------------- | 946 // ----------------------------------------------------------------------------- |
| 947 // PagedSpace implementation | 947 // PagedSpace implementation |
| 948 | 948 |
| 949 PagedSpace::PagedSpace(Heap* heap, | 949 PagedSpace::PagedSpace(Heap* heap, |
| 950 intptr_t max_capacity, | 950 intptr_t max_capacity, |
| 951 AllocationSpace id, | 951 AllocationSpace id, |
| 952 Executability executable) | 952 Executability executable) |
| 953 : Space(heap, id, executable), | 953 : Space(heap, id, executable), |
| 954 free_list_(this), | 954 free_list_(this), |
| 955 was_swept_conservatively_(false), | 955 was_swept_conservatively_(false), |
| 956 first_unswept_page_(Page::FromAddress(NULL)), | |
| 957 unswept_free_bytes_(0) { | 956 unswept_free_bytes_(0) { |
| 958 if (id == CODE_SPACE) { | 957 if (id == CODE_SPACE) { |
| 959 area_size_ = heap->isolate()->memory_allocator()-> | 958 area_size_ = heap->isolate()->memory_allocator()-> |
| 960 CodePageAreaSize(); | 959 CodePageAreaSize(); |
| 961 } else { | 960 } else { |
| 962 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 961 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 963 } | 962 } |
| 964 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 963 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 965 * AreaSize(); | 964 * AreaSize(); |
| 966 accounting_stats_.Clear(); | 965 accounting_stats_.Clear(); |
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1124 | 1123 |
| 1125 void PagedSpace::IncreaseCapacity(int size) { | 1124 void PagedSpace::IncreaseCapacity(int size) { |
| 1126 accounting_stats_.ExpandSpace(size); | 1125 accounting_stats_.ExpandSpace(size); |
| 1127 } | 1126 } |
| 1128 | 1127 |
| 1129 | 1128 |
| 1130 void PagedSpace::ReleasePage(Page* page, bool unlink) { | 1129 void PagedSpace::ReleasePage(Page* page, bool unlink) { |
| 1131 ASSERT(page->LiveBytes() == 0); | 1130 ASSERT(page->LiveBytes() == 0); |
| 1132 ASSERT(AreaSize() == page->area_size()); | 1131 ASSERT(AreaSize() == page->area_size()); |
| 1133 | 1132 |
| 1134 // Adjust list of unswept pages if the page is the head of the list. | |
| 1135 if (first_unswept_page_ == page) { | |
| 1136 first_unswept_page_ = page->next_page(); | |
| 1137 if (first_unswept_page_ == anchor()) { | |
| 1138 first_unswept_page_ = Page::FromAddress(NULL); | |
| 1139 } | |
| 1140 } | |
| 1141 | |
| 1142 if (page->WasSwept()) { | 1133 if (page->WasSwept()) { |
| 1143 intptr_t size = free_list_.EvictFreeListItems(page); | 1134 intptr_t size = free_list_.EvictFreeListItems(page); |
| 1144 accounting_stats_.AllocateBytes(size); | 1135 accounting_stats_.AllocateBytes(size); |
| 1145 ASSERT_EQ(AreaSize(), static_cast<int>(size)); | 1136 ASSERT_EQ(AreaSize(), static_cast<int>(size)); |
| 1146 } else { | 1137 } else { |
| 1147 DecreaseUnsweptFreeBytes(page); | 1138 DecreaseUnsweptFreeBytes(page); |
| 1148 } | 1139 } |
| 1149 | 1140 |
| 1150 // TODO(hpayer): This check is just used for debugging purpose and | 1141 // TODO(hpayer): This check is just used for debugging purpose and |
| 1151 // should be removed or turned into an assert after investigating the | 1142 // should be removed or turned into an assert after investigating the |
| (...skipping 1396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2548 | 2539 |
| 2549 | 2540 |
| 2550 // ----------------------------------------------------------------------------- | 2541 // ----------------------------------------------------------------------------- |
| 2551 // OldSpace implementation | 2542 // OldSpace implementation |
| 2552 | 2543 |
| 2553 void PagedSpace::PrepareForMarkCompact() { | 2544 void PagedSpace::PrepareForMarkCompact() { |
| 2554 // We don't have a linear allocation area while sweeping. It will be restored | 2545 // We don't have a linear allocation area while sweeping. It will be restored |
| 2555 // on the first allocation after the sweep. | 2546 // on the first allocation after the sweep. |
| 2556 EmptyAllocationInfo(); | 2547 EmptyAllocationInfo(); |
| 2557 | 2548 |
| 2558 // Stop lazy sweeping and clear marking bits for unswept pages. | 2549 // This counter will be increased for pages which will be swept by the |
| 2559 if (first_unswept_page_ != NULL) { | 2550 // sweeper threads. |
| 2560 Page* p = first_unswept_page_; | |
| 2561 do { | |
| 2562 // Do not use ShouldBeSweptLazily predicate here. | |
| 2563 // New evacuation candidates were selected but they still have | |
| 2564 // to be swept before collection starts. | |
| 2565 if (!p->WasSwept()) { | |
| 2566 Bitmap::Clear(p); | |
| 2567 if (FLAG_gc_verbose) { | |
| 2568 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", | |
| 2569 reinterpret_cast<intptr_t>(p)); | |
| 2570 } | |
| 2571 } | |
| 2572 p = p->next_page(); | |
| 2573 } while (p != anchor()); | |
| 2574 } | |
| 2575 first_unswept_page_ = Page::FromAddress(NULL); | |
| 2576 unswept_free_bytes_ = 0; | 2551 unswept_free_bytes_ = 0; |
| 2577 | 2552 |
| 2578 // Clear the free list before a full GC---it will be rebuilt afterward. | 2553 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2579 free_list_.Reset(); | 2554 free_list_.Reset(); |
| 2580 } | 2555 } |
| 2581 | 2556 |
| 2582 | 2557 |
| 2583 intptr_t PagedSpace::SizeOfObjects() { | 2558 intptr_t PagedSpace::SizeOfObjects() { |
| 2584 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 2559 ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() || |
| 2560 (unswept_free_bytes_ == 0)); |
| 2585 return Size() - unswept_free_bytes_ - (limit() - top()); | 2561 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2586 } | 2562 } |
| 2587 | 2563 |
| 2588 | 2564 |
| 2589 // After we have booted, we have created a map which represents free space | 2565 // After we have booted, we have created a map which represents free space |
| 2590 // on the heap. If there was already a free list then the elements on it | 2566 // on the heap. If there was already a free list then the elements on it |
| 2591 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2567 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2592 // fix them. | 2568 // fix them. |
| 2593 void PagedSpace::RepairFreeListsAfterBoot() { | 2569 void PagedSpace::RepairFreeListsAfterBoot() { |
| 2594 free_list_.RepairLists(heap()); | 2570 free_list_.RepairLists(heap()); |
| 2595 } | 2571 } |
| 2596 | 2572 |
| 2597 | 2573 |
| 2598 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | |
| 2599 if (IsLazySweepingComplete()) return true; | |
| 2600 | |
| 2601 intptr_t freed_bytes = 0; | |
| 2602 Page* p = first_unswept_page_; | |
| 2603 do { | |
| 2604 Page* next_page = p->next_page(); | |
| 2605 if (ShouldBeSweptLazily(p)) { | |
| 2606 if (FLAG_gc_verbose) { | |
| 2607 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | |
| 2608 reinterpret_cast<intptr_t>(p)); | |
| 2609 } | |
| 2610 DecreaseUnsweptFreeBytes(p); | |
| 2611 freed_bytes += | |
| 2612 MarkCompactCollector:: | |
| 2613 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( | |
| 2614 this, NULL, p); | |
| 2615 } | |
| 2616 p = next_page; | |
| 2617 } while (p != anchor() && freed_bytes < bytes_to_sweep); | |
| 2618 | |
| 2619 if (p == anchor()) { | |
| 2620 first_unswept_page_ = Page::FromAddress(NULL); | |
| 2621 } else { | |
| 2622 first_unswept_page_ = p; | |
| 2623 } | |
| 2624 | |
| 2625 heap()->FreeQueuedChunks(); | |
| 2626 | |
| 2627 return IsLazySweepingComplete(); | |
| 2628 } | |
| 2629 | |
| 2630 | |
| 2631 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | 2574 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2632 if (allocation_info_.top() >= allocation_info_.limit()) return; | 2575 if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2633 | 2576 |
| 2634 if (Page::FromAllocationTop(allocation_info_.top())-> | 2577 if (Page::FromAllocationTop(allocation_info_.top())-> |
| 2635 IsEvacuationCandidate()) { | 2578 IsEvacuationCandidate()) { |
| 2636 // Create filler object to keep page iterable if it was iterable. | 2579 // Create filler object to keep page iterable if it was iterable. |
| 2637 int remaining = | 2580 int remaining = |
| 2638 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); | 2581 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2639 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); | 2582 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2640 | 2583 |
| 2641 allocation_info_.set_top(NULL); | 2584 allocation_info_.set_top(NULL); |
| 2642 allocation_info_.set_limit(NULL); | 2585 allocation_info_.set_limit(NULL); |
| 2643 } | 2586 } |
| 2644 } | 2587 } |
| 2645 | 2588 |
| 2646 | 2589 |
| 2647 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { | 2590 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { |
| 2648 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2591 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2649 if (collector->AreSweeperThreadsActivated()) { | 2592 if (collector->AreSweeperThreadsActivated()) { |
| 2650 if (collector->IsConcurrentSweepingInProgress()) { | 2593 if (collector->IsConcurrentSweepingInProgress()) { |
| 2651 if (collector->RefillFreeLists(this) < size_in_bytes) { | 2594 if (collector->RefillFreeLists(this) < size_in_bytes) { |
| 2652 if (!collector->sequential_sweeping()) { | 2595 if (!collector->sequential_sweeping()) { |
| 2653 collector->WaitUntilSweepingCompleted(); | 2596 collector->WaitUntilSweepingCompleted(); |
| 2654 return true; | 2597 return true; |
| 2655 } | 2598 } |
| 2656 } | 2599 } |
| 2657 return false; | 2600 return false; |
| 2658 } | 2601 } |
| 2659 return true; | |
| 2660 } else { | |
| 2661 return AdvanceSweeper(size_in_bytes); | |
| 2662 } | 2602 } |
| 2603 return true; |
| 2663 } | 2604 } |
| 2664 | 2605 |
| 2665 | 2606 |
| 2666 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 2607 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2667 // Allocation in this space has failed. | 2608 // Allocation in this space has failed. |
| 2668 | 2609 |
| 2669 // If there are unswept pages advance lazy sweeper a bounded number of times | 2610 // If there are unswept pages advance sweeping a bounded number of times |
| 2670 // until we find a size_in_bytes contiguous piece of memory | 2611 // until we find a size_in_bytes contiguous piece of memory |
| 2671 const int kMaxSweepingTries = 5; | 2612 const int kMaxSweepingTries = 5; |
| 2672 bool sweeping_complete = false; | 2613 bool sweeping_complete = false; |
| 2673 | 2614 |
| 2674 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { | 2615 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { |
| 2675 sweeping_complete = EnsureSweeperProgress(size_in_bytes); | 2616 sweeping_complete = EnsureSweeperProgress(size_in_bytes); |
| 2676 | 2617 |
| 2677 // Retry the free list allocation. | 2618 // Retry the free list allocation. |
| 2678 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2619 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2679 if (object != NULL) return object; | 2620 if (object != NULL) return object; |
| 2680 } | 2621 } |
| 2681 | 2622 |
| 2682 // Free list allocation failed and there is no next page. Fail if we have | 2623 // Free list allocation failed and there is no next page. Fail if we have |
| 2683 // hit the old generation size limit that should cause a garbage | 2624 // hit the old generation size limit that should cause a garbage |
| 2684 // collection. | 2625 // collection. |
| 2685 if (!heap()->always_allocate() && | 2626 if (!heap()->always_allocate() && |
| 2686 heap()->OldGenerationAllocationLimitReached()) { | 2627 heap()->OldGenerationAllocationLimitReached()) { |
| 2687 return NULL; | 2628 return NULL; |
| 2688 } | 2629 } |
| 2689 | 2630 |
| 2690 // Try to expand the space and allocate in the new next page. | 2631 // Try to expand the space and allocate in the new next page. |
| 2691 if (Expand()) { | 2632 if (Expand()) { |
| 2692 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); | 2633 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); |
| 2693 return free_list_.Allocate(size_in_bytes); | 2634 return free_list_.Allocate(size_in_bytes); |
| 2694 } | 2635 } |
| 2695 | 2636 |
| 2696 // Last ditch, sweep all the remaining pages to try to find space. This may | 2637 // Last ditch, sweep all the remaining pages to try to find space. |
| 2697 // cause a pause. | 2638 if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) { |
| 2698 if (!IsLazySweepingComplete()) { | 2639 heap()->mark_compact_collector()->WaitUntilSweepingCompleted(); |
| 2699 EnsureSweeperProgress(kMaxInt); | |
| 2700 | 2640 |
| 2701 // Retry the free list allocation. | 2641 // Retry the free list allocation. |
| 2702 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2642 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2703 if (object != NULL) return object; | 2643 if (object != NULL) return object; |
| 2704 } | 2644 } |
| 2705 | 2645 |
| 2706 // Finally, fail. | 2646 // Finally, fail. |
| 2707 return NULL; | 2647 return NULL; |
| 2708 } | 2648 } |
| 2709 | 2649 |
| (...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3214 object->ShortPrint(); | 3154 object->ShortPrint(); |
| 3215 PrintF("\n"); | 3155 PrintF("\n"); |
| 3216 } | 3156 } |
| 3217 printf(" --------------------------------------\n"); | 3157 printf(" --------------------------------------\n"); |
| 3218 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3158 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3219 } | 3159 } |
| 3220 | 3160 |
| 3221 #endif // DEBUG | 3161 #endif // DEBUG |
| 3222 | 3162 |
| 3223 } } // namespace v8::internal | 3163 } } // namespace v8::internal |
| OLD | NEW |