OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
(...skipping 2442 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2453 | 2453 |
2454 private: | 2454 private: |
2455 NewSpacePage* prev_page_; // Previous page returned. | 2455 NewSpacePage* prev_page_; // Previous page returned. |
2456 // Next page that will be returned. Cached here so that we can use this | 2456 // Next page that will be returned. Cached here so that we can use this |
2457 // iterator for operations that deallocate pages. | 2457 // iterator for operations that deallocate pages. |
2458 NewSpacePage* next_page_; | 2458 NewSpacePage* next_page_; |
2459 // Last page returned. | 2459 // Last page returned. |
2460 NewSpacePage* last_page_; | 2460 NewSpacePage* last_page_; |
2461 }; | 2461 }; |
2462 | 2462 |
2463 // ----------------------------------------------------------------------------- | |
2464 // Allows observation of inline allocation in the new space. | |
2465 class InlineAllocationObserver { | |
2466 public: | |
2467 InlineAllocationObserver(const InlineAllocationObserver&) = delete; | |
ulan
2015/10/22 09:02:22
Please use DISALLOW_COPY_AND_ASSIGN() macro.
ofrobots
2015/10/26 21:55:01
Done.
| |
2468 InlineAllocationObserver& operator=(const InlineAllocationObserver&) = delete; | |
2469 | |
2470 explicit InlineAllocationObserver(intptr_t step_size) | |
2471 : step_size_(step_size), bytes_to_next_step_(step_size) { | |
2472 DCHECK(step_size >= kPointerSize); | |
2473 } | |
2474 virtual ~InlineAllocationObserver() {} | |
2475 | |
2476 private: | |
2477 intptr_t step_size() const { return step_size_; } | |
2478 | |
2479 // Pure virtual method provided by the subclasses that gets called when at | |
2480 // least step_size byte have been allocated. | |
2481 virtual void Step(int bytes_allocated) = 0; | |
2482 | |
2483 // Called each time the new space does an inline allocation step. This may be | |
2484 // more frequently than the step_size we are monitoring (e.g. when there are | |
2485 // multiple observers, or when page or space boundary is encountered.) The | |
2486 // Step method is only called once at least step_size bytes have been | |
2487 // allocated. | |
2488 void InlineAllocationStep(int bytes_allocated) { | |
2489 bytes_to_next_step_ -= bytes_allocated; | |
2490 if (bytes_to_next_step_ <= 0) { | |
2491 Step(static_cast<int>(step_size_ - bytes_to_next_step_)); | |
2492 bytes_to_next_step_ = step_size_; | |
2493 } | |
2494 } | |
2495 | |
2496 intptr_t step_size_; | |
2497 intptr_t bytes_to_next_step_; | |
2498 | |
2499 friend class NewSpace; | |
2500 }; | |
2463 | 2501 |
2464 // ----------------------------------------------------------------------------- | 2502 // ----------------------------------------------------------------------------- |
2465 // The young generation space. | 2503 // The young generation space. |
2466 // | 2504 // |
2467 // The new space consists of a contiguous pair of semispaces. It simply | 2505 // The new space consists of a contiguous pair of semispaces. It simply |
2468 // forwards most functions to the appropriate semispace. | 2506 // forwards most functions to the appropriate semispace. |
2469 | 2507 |
2470 class NewSpace : public Space { | 2508 class NewSpace : public Space { |
2471 public: | 2509 public: |
2472 // Constructor. | 2510 // Constructor. |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2642 MUST_USE_RESULT INLINE( | 2680 MUST_USE_RESULT INLINE( |
2643 AllocationResult AllocateRawUnaligned(int size_in_bytes)); | 2681 AllocationResult AllocateRawUnaligned(int size_in_bytes)); |
2644 | 2682 |
2645 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw( | 2683 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw( |
2646 int size_in_bytes, AllocationAlignment alignment)); | 2684 int size_in_bytes, AllocationAlignment alignment)); |
2647 | 2685 |
2648 // Reset the allocation pointer to the beginning of the active semispace. | 2686 // Reset the allocation pointer to the beginning of the active semispace. |
2649 void ResetAllocationInfo(); | 2687 void ResetAllocationInfo(); |
2650 | 2688 |
2651 void UpdateInlineAllocationLimit(int size_in_bytes); | 2689 void UpdateInlineAllocationLimit(int size_in_bytes); |
2652 void LowerInlineAllocationLimit(intptr_t step) { | 2690 |
2653 inline_allocation_limit_step_ = step; | 2691 // Allows observation of inline allocation. The observer->Step() method gets |
2692 // called after every step_size bytes have been allocated (approximately). | |
2693 // This works by adjusting the allocation limit to a lower value and adjusting | |
2694 // it after each step. | |
2695 void AddInlineAllocationObserver(InlineAllocationObserver* observer); | |
2696 | |
2697 // Removes a previously installed observer. | |
2698 void RemoveInlineAllocationObserver(InlineAllocationObserver* observer); | |
2699 | |
2700 void DisableInlineAllocationSteps() { | |
2701 inline_allocation_limit_step_ = 0; | |
2702 top_on_previous_step_ = 0; | |
2654 UpdateInlineAllocationLimit(0); | 2703 UpdateInlineAllocationLimit(0); |
2655 top_on_previous_step_ = step ? allocation_info_.top() : 0; | |
2656 } | 2704 } |
2657 | 2705 |
2658 // Get the extent of the inactive semispace (for use as a marking stack, | 2706 // Get the extent of the inactive semispace (for use as a marking stack, |
2659 // or to zap it). Notice: space-addresses are not necessarily on the | 2707 // or to zap it). Notice: space-addresses are not necessarily on the |
2660 // same page, so FromSpaceStart() might be above FromSpaceEnd(). | 2708 // same page, so FromSpaceStart() might be above FromSpaceEnd(). |
2661 Address FromSpacePageLow() { return from_space_.page_low(); } | 2709 Address FromSpacePageLow() { return from_space_.page_low(); } |
2662 Address FromSpacePageHigh() { return from_space_.page_high(); } | 2710 Address FromSpacePageHigh() { return from_space_.page_high(); } |
2663 Address FromSpaceStart() { return from_space_.space_start(); } | 2711 Address FromSpaceStart() { return from_space_.space_start(); } |
2664 Address FromSpaceEnd() { return from_space_.space_end(); } | 2712 Address FromSpaceEnd() { return from_space_.space_end(); } |
2665 | 2713 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2721 } | 2769 } |
2722 | 2770 |
2723 bool IsFromSpaceCommitted() { return from_space_.is_committed(); } | 2771 bool IsFromSpaceCommitted() { return from_space_.is_committed(); } |
2724 | 2772 |
2725 SemiSpace* active_space() { return &to_space_; } | 2773 SemiSpace* active_space() { return &to_space_; } |
2726 | 2774 |
2727 private: | 2775 private: |
2728 // Update allocation info to match the current to-space page. | 2776 // Update allocation info to match the current to-space page. |
2729 void UpdateAllocationInfo(); | 2777 void UpdateAllocationInfo(); |
2730 | 2778 |
2779 void UpdateInlineAllocationLimitStep(); | |
2780 | |
2731 Address chunk_base_; | 2781 Address chunk_base_; |
2732 uintptr_t chunk_size_; | 2782 uintptr_t chunk_size_; |
2733 | 2783 |
2734 // The semispaces. | 2784 // The semispaces. |
2735 SemiSpace to_space_; | 2785 SemiSpace to_space_; |
2736 SemiSpace from_space_; | 2786 SemiSpace from_space_; |
2737 base::VirtualMemory reservation_; | 2787 base::VirtualMemory reservation_; |
2738 int pages_used_; | 2788 int pages_used_; |
2739 | 2789 |
2740 // Start address and bit mask for containment testing. | 2790 // Start address and bit mask for containment testing. |
2741 Address start_; | 2791 Address start_; |
2742 uintptr_t address_mask_; | 2792 uintptr_t address_mask_; |
2743 uintptr_t object_mask_; | 2793 uintptr_t object_mask_; |
2744 uintptr_t object_expected_; | 2794 uintptr_t object_expected_; |
2745 | 2795 |
2746 // Allocation pointer and limit for normal allocation and allocation during | 2796 // Allocation pointer and limit for normal allocation and allocation during |
2747 // mark-compact collection. | 2797 // mark-compact collection. |
2748 AllocationInfo allocation_info_; | 2798 AllocationInfo allocation_info_; |
2749 | 2799 |
2750 // When incremental marking is active we will set allocation_info_.limit | 2800 // When inline allocation stepping is active, either because of incremental |
2751 // to be lower than actual limit and then will gradually increase it | 2801 // marking or because of idle scavenge, we 'interrupt' inline allocation every |
2752 // in steps to guarantee that we do incremental marking steps even | 2802 // once in a while. This is done by setting allocation_info_.limit to be lower |
2753 // when all allocation is performed from inlined generated code. | 2803 // than the actual limit and and increasing it in steps to guarantee that the |
2804 // observers are notified periodically. | |
2754 intptr_t inline_allocation_limit_step_; | 2805 intptr_t inline_allocation_limit_step_; |
2806 List<InlineAllocationObserver*> inline_allocation_observers_; | |
2755 | 2807 |
2756 Address top_on_previous_step_; | 2808 Address top_on_previous_step_; |
2757 | 2809 |
2758 HistogramInfo* allocated_histogram_; | 2810 HistogramInfo* allocated_histogram_; |
2759 HistogramInfo* promoted_histogram_; | 2811 HistogramInfo* promoted_histogram_; |
2760 | 2812 |
2761 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); | 2813 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); |
2762 | 2814 |
2763 // If we are doing inline allocation in steps, this method performs the 'step' | 2815 // If we are doing inline allocation in steps, this method performs the 'step' |
2764 // operation. Right now incremental marking is the only consumer of inline | 2816 // operation. top is the memory address of the bump pointer at the last |
2765 // allocation steps. top is the memory address of the bump pointer at the last | |
2766 // inline allocation (i.e. it determines the numbers of bytes actually | 2817 // inline allocation (i.e. it determines the numbers of bytes actually |
2767 // allocated since the last step.) new_top is the address of the bump pointer | 2818 // allocated since the last step.) new_top is the address of the bump pointer |
2768 // where the next byte is going to be allocated from. top and new_top may be | 2819 // where the next byte is going to be allocated from. top and new_top may be |
2769 // different when we cross a page boundary or reset the space. | 2820 // different when we cross a page boundary or reset the space. |
2770 void InlineAllocationStep(Address top, Address new_top); | 2821 void InlineAllocationStep(Address top, Address new_top); |
2771 | 2822 |
2772 friend class SemiSpaceIterator; | 2823 friend class SemiSpaceIterator; |
2773 }; | 2824 }; |
2774 | 2825 |
2775 // ----------------------------------------------------------------------------- | 2826 // ----------------------------------------------------------------------------- |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3009 count = 0; | 3060 count = 0; |
3010 } | 3061 } |
3011 // Must be small, since an iteration is used for lookup. | 3062 // Must be small, since an iteration is used for lookup. |
3012 static const int kMaxComments = 64; | 3063 static const int kMaxComments = 64; |
3013 }; | 3064 }; |
3014 #endif | 3065 #endif |
3015 } // namespace internal | 3066 } // namespace internal |
3016 } // namespace v8 | 3067 } // namespace v8 |
3017 | 3068 |
3018 #endif // V8_HEAP_SPACES_H_ | 3069 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |