OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | |
10 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
11 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
12 #include "src/base/platform/mutex.h" | 11 #include "src/base/platform/mutex.h" |
13 #include "src/flags.h" | 12 #include "src/flags.h" |
14 #include "src/hashmap.h" | 13 #include "src/hashmap.h" |
15 #include "src/list.h" | 14 #include "src/list.h" |
16 #include "src/objects.h" | 15 #include "src/objects.h" |
17 #include "src/utils.h" | 16 #include "src/utils.h" |
18 | 17 |
19 namespace v8 { | 18 namespace v8 { |
(...skipping 534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
554 | 553 |
555 static const size_t kSlotsBufferOffset = | 554 static const size_t kSlotsBufferOffset = |
556 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 555 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
557 | 556 |
558 static const size_t kWriteBarrierCounterOffset = | 557 static const size_t kWriteBarrierCounterOffset = |
559 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 558 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
560 + kPointerSize; // SkipList* skip_list_; | 559 + kPointerSize; // SkipList* skip_list_; |
561 | 560 |
562 static const size_t kMinHeaderSize = | 561 static const size_t kMinHeaderSize = |
563 kWriteBarrierCounterOffset + | 562 kWriteBarrierCounterOffset + |
564 kIntptrSize // intptr_t write_barrier_counter_ | 563 kIntptrSize // intptr_t write_barrier_counter_ |
565 + kIntSize // int progress_bar_ | 564 + kIntSize // int progress_bar_ |
566 + kPointerSize // AtomicValue high_water_mark_ | 565 + kIntSize // int high_water_mark_ |
567 + kPointerSize // base::Mutex* mutex_ | 566 + kPointerSize // base::Mutex* mutex_ |
568 + kPointerSize // base::AtomicWord parallel_sweeping_ | 567 + kPointerSize // base::AtomicWord parallel_sweeping_ |
569 + 5 * kPointerSize // AtomicNumber free-list statistics | 568 + 5 * kIntSize // int free-list statistics |
570 + kPointerSize // base::AtomicWord next_chunk_ | 569 + kPointerSize // base::AtomicWord next_chunk_ |
571 + kPointerSize; // base::AtomicWord prev_chunk_ | 570 + kPointerSize; // base::AtomicWord prev_chunk_ |
572 | 571 |
573 // We add some more space to the computed header size to amount for missing | 572 // We add some more space to the computed header size to amount for missing |
574 // alignment requirements in our computation. | 573 // alignment requirements in our computation. |
575 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 574 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
576 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; | 575 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; |
577 | 576 |
578 static const int kBodyOffset = | 577 static const int kBodyOffset = |
579 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 578 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
580 | 579 |
581 // The start offset of the object area in a page. Aligned to both maps and | 580 // The start offset of the object area in a page. Aligned to both maps and |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
668 DCHECK(slots_buffer_ == NULL); | 667 DCHECK(slots_buffer_ == NULL); |
669 ClearFlag(EVACUATION_CANDIDATE); | 668 ClearFlag(EVACUATION_CANDIDATE); |
670 } | 669 } |
671 | 670 |
672 Address area_start() { return area_start_; } | 671 Address area_start() { return area_start_; } |
673 Address area_end() { return area_end_; } | 672 Address area_end() { return area_end_; } |
674 int area_size() { return static_cast<int>(area_end() - area_start()); } | 673 int area_size() { return static_cast<int>(area_end() - area_start()); } |
675 bool CommitArea(size_t requested); | 674 bool CommitArea(size_t requested); |
676 | 675 |
677 // Approximate amount of physical memory committed for this chunk. | 676 // Approximate amount of physical memory committed for this chunk. |
678 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 677 size_t CommittedPhysicalMemory() { return high_water_mark_; } |
679 | 678 |
680 // Should be called when memory chunk is about to be freed. | 679 // Should be called when memory chunk is about to be freed. |
681 void ReleaseAllocatedMemory(); | 680 void ReleaseAllocatedMemory(); |
682 | 681 |
683 static inline void UpdateHighWaterMark(Address mark) { | 682 static inline void UpdateHighWaterMark(Address mark) { |
684 if (mark == nullptr) return; | 683 if (mark == NULL) return; |
685 // Need to subtract one from the mark because when a chunk is full the | 684 // Need to subtract one from the mark because when a chunk is full the |
686 // top points to the next address after the chunk, which effectively belongs | 685 // top points to the next address after the chunk, which effectively belongs |
687 // to another chunk. See the comment to Page::FromAllocationTop. | 686 // to another chunk. See the comment to Page::FromAllocationTop. |
688 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | 687 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
689 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); | 688 int new_mark = static_cast<int>(mark - chunk->address()); |
690 intptr_t old_mark = 0; | 689 if (new_mark > chunk->high_water_mark_) { |
691 do { | 690 chunk->high_water_mark_ = new_mark; |
692 old_mark = chunk->high_water_mark_.Value(); | 691 } |
693 } while ((new_mark > old_mark) && | |
694 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); | |
695 } | 692 } |
696 | 693 |
697 protected: | 694 protected: |
698 size_t size_; | 695 size_t size_; |
699 intptr_t flags_; | 696 intptr_t flags_; |
700 | 697 |
701 // Start and end of allocatable memory on this chunk. | 698 // Start and end of allocatable memory on this chunk. |
702 Address area_start_; | 699 Address area_start_; |
703 Address area_end_; | 700 Address area_end_; |
704 | 701 |
(...skipping 10 matching lines...) Expand all Loading... |
715 // Count of bytes marked black on page. | 712 // Count of bytes marked black on page. |
716 int live_byte_count_; | 713 int live_byte_count_; |
717 SlotsBuffer* slots_buffer_; | 714 SlotsBuffer* slots_buffer_; |
718 SkipList* skip_list_; | 715 SkipList* skip_list_; |
719 intptr_t write_barrier_counter_; | 716 intptr_t write_barrier_counter_; |
720 // Used by the incremental marker to keep track of the scanning progress in | 717 // Used by the incremental marker to keep track of the scanning progress in |
721 // large objects that have a progress bar and are scanned in increments. | 718 // large objects that have a progress bar and are scanned in increments. |
722 int progress_bar_; | 719 int progress_bar_; |
723 // Assuming the initial allocation on a page is sequential, | 720 // Assuming the initial allocation on a page is sequential, |
724 // count highest number of bytes ever allocated on the page. | 721 // count highest number of bytes ever allocated on the page. |
725 AtomicValue<intptr_t> high_water_mark_; | 722 int high_water_mark_; |
726 | 723 |
727 base::Mutex* mutex_; | 724 base::Mutex* mutex_; |
728 base::AtomicWord parallel_sweeping_; | 725 base::AtomicWord parallel_sweeping_; |
729 | 726 |
730 // PagedSpace free-list statistics. | 727 // PagedSpace free-list statistics. |
731 AtomicNumber<intptr_t> available_in_small_free_list_; | 728 int available_in_small_free_list_; |
732 AtomicNumber<intptr_t> available_in_medium_free_list_; | 729 int available_in_medium_free_list_; |
733 AtomicNumber<intptr_t> available_in_large_free_list_; | 730 int available_in_large_free_list_; |
734 AtomicNumber<intptr_t> available_in_huge_free_list_; | 731 int available_in_huge_free_list_; |
735 AtomicNumber<intptr_t> non_available_small_blocks_; | 732 int non_available_small_blocks_; |
736 | 733 |
737 // next_chunk_ holds a pointer of type MemoryChunk | 734 // next_chunk_ holds a pointer of type MemoryChunk |
738 base::AtomicWord next_chunk_; | 735 base::AtomicWord next_chunk_; |
739 // prev_chunk_ holds a pointer of type MemoryChunk | 736 // prev_chunk_ holds a pointer of type MemoryChunk |
740 base::AtomicWord prev_chunk_; | 737 base::AtomicWord prev_chunk_; |
741 | 738 |
742 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | 739 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
743 Address area_start, Address area_end, | 740 Address area_start, Address area_end, |
744 Executability executable, Space* owner); | 741 Executability executable, Space* owner); |
745 | 742 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
824 | 821 |
825 void InitializeAsAnchor(PagedSpace* owner); | 822 void InitializeAsAnchor(PagedSpace* owner); |
826 | 823 |
827 bool WasSwept() { return IsFlagSet(WAS_SWEPT); } | 824 bool WasSwept() { return IsFlagSet(WAS_SWEPT); } |
828 void SetWasSwept() { SetFlag(WAS_SWEPT); } | 825 void SetWasSwept() { SetFlag(WAS_SWEPT); } |
829 void ClearWasSwept() { ClearFlag(WAS_SWEPT); } | 826 void ClearWasSwept() { ClearFlag(WAS_SWEPT); } |
830 | 827 |
831 void ResetFreeListStatistics(); | 828 void ResetFreeListStatistics(); |
832 | 829 |
833 int LiveBytesFromFreeList() { | 830 int LiveBytesFromFreeList() { |
834 return static_cast<int>( | 831 return area_size() - non_available_small_blocks_ - |
835 area_size() - non_available_small_blocks() - | 832 available_in_small_free_list_ - available_in_medium_free_list_ - |
836 available_in_small_free_list() - available_in_medium_free_list() - | 833 available_in_large_free_list_ - available_in_huge_free_list_; |
837 available_in_large_free_list() - available_in_huge_free_list()); | |
838 } | 834 } |
839 | 835 |
840 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ | 836 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
841 type name() { return name##_.Value(); } \ | 837 type name() { return name##_; } \ |
842 void set_##name(type name) { name##_.SetValue(name); } \ | 838 void set_##name(type name) { name##_ = name; } \ |
843 void add_##name(type name) { name##_.Increment(name); } | 839 void add_##name(type name) { name##_ += name; } |
844 | 840 |
845 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) | 841 FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) |
846 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) | 842 FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) |
847 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) | 843 FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) |
848 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) | 844 FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) |
849 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) | 845 FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) |
850 | 846 |
851 #undef FRAGMENTATION_STATS_ACCESSORS | 847 #undef FRAGMENTATION_STATS_ACCESSORS |
852 | 848 |
853 #ifdef DEBUG | 849 #ifdef DEBUG |
854 void Print(); | 850 void Print(); |
855 #endif // DEBUG | 851 #endif // DEBUG |
856 | 852 |
857 friend class MemoryAllocator; | 853 friend class MemoryAllocator; |
858 }; | 854 }; |
859 | 855 |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1125 // bookkeeping and calls the allocation callback. | 1121 // bookkeeping and calls the allocation callback. |
1126 void PreFreeMemory(MemoryChunk* chunk); | 1122 void PreFreeMemory(MemoryChunk* chunk); |
1127 | 1123 |
1128 // FreeMemory can be called concurrently when PreFree was executed before. | 1124 // FreeMemory can be called concurrently when PreFree was executed before. |
1129 void PerformFreeMemory(MemoryChunk* chunk); | 1125 void PerformFreeMemory(MemoryChunk* chunk); |
1130 | 1126 |
1131 // Free is a wrapper method, which calls PreFree and PerformFreeMemory | 1127 // Free is a wrapper method, which calls PreFree and PerformFreeMemory |
1132 // together. | 1128 // together. |
1133 void Free(MemoryChunk* chunk); | 1129 void Free(MemoryChunk* chunk); |
1134 | 1130 |
| 1131 // Returns the maximum available bytes of heaps. |
| 1132 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
| 1133 |
1135 // Returns allocated spaces in bytes. | 1134 // Returns allocated spaces in bytes. |
1136 intptr_t Size() { return size_.Value(); } | 1135 intptr_t Size() { return size_; } |
1137 | |
1138 // Returns allocated executable spaces in bytes. | |
1139 intptr_t SizeExecutable() { return size_executable_.Value(); } | |
1140 | |
1141 // Returns the maximum available bytes of heaps. | |
1142 intptr_t Available() { | |
1143 intptr_t size = Size(); | |
1144 return capacity_ < size ? 0 : capacity_ - size; | |
1145 } | |
1146 | 1136 |
1147 // Returns the maximum available executable bytes of heaps. | 1137 // Returns the maximum available executable bytes of heaps. |
1148 intptr_t AvailableExecutable() { | 1138 intptr_t AvailableExecutable() { |
1149 intptr_t executable_size = SizeExecutable(); | 1139 if (capacity_executable_ < size_executable_) return 0; |
1150 if (capacity_executable_ < executable_size) return 0; | 1140 return capacity_executable_ - size_executable_; |
1151 return capacity_executable_ - executable_size; | |
1152 } | 1141 } |
1153 | 1142 |
| 1143 // Returns allocated executable spaces in bytes. |
| 1144 intptr_t SizeExecutable() { return size_executable_; } |
| 1145 |
1154 // Returns maximum available bytes that the old space can have. | 1146 // Returns maximum available bytes that the old space can have. |
1155 intptr_t MaxAvailable() { | 1147 intptr_t MaxAvailable() { |
1156 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; | 1148 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; |
1157 } | 1149 } |
1158 | 1150 |
1159 // Returns an indication of whether a pointer is in a space that has | 1151 // Returns an indication of whether a pointer is in a space that has |
1160 // been allocated by this MemoryAllocator. | 1152 // been allocated by this MemoryAllocator. |
1161 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { | 1153 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { |
1162 return address < lowest_ever_allocated_.Value() || | 1154 return address < lowest_ever_allocated_ || |
1163 address >= highest_ever_allocated_.Value(); | 1155 address >= highest_ever_allocated_; |
1164 } | 1156 } |
1165 | 1157 |
1166 #ifdef DEBUG | 1158 #ifdef DEBUG |
1167 // Reports statistic info of the space. | 1159 // Reports statistic info of the space. |
1168 void ReportStatistics(); | 1160 void ReportStatistics(); |
1169 #endif | 1161 #endif |
1170 | 1162 |
1171 // Returns a MemoryChunk in which the memory region from commit_area_size to | 1163 // Returns a MemoryChunk in which the memory region from commit_area_size to |
1172 // reserve_area_size of the chunk area is reserved but not committed, it | 1164 // reserve_area_size of the chunk area is reserved but not committed, it |
1173 // could be committed later by calling MemoryChunk::CommitArea. | 1165 // could be committed later by calling MemoryChunk::CommitArea. |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1233 } | 1225 } |
1234 | 1226 |
1235 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, | 1227 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
1236 Address start, size_t commit_size, | 1228 Address start, size_t commit_size, |
1237 size_t reserved_size); | 1229 size_t reserved_size); |
1238 | 1230 |
1239 private: | 1231 private: |
1240 Isolate* isolate_; | 1232 Isolate* isolate_; |
1241 | 1233 |
1242 // Maximum space size in bytes. | 1234 // Maximum space size in bytes. |
1243 intptr_t capacity_; | 1235 size_t capacity_; |
1244 // Maximum subset of capacity_ that can be executable | 1236 // Maximum subset of capacity_ that can be executable |
1245 intptr_t capacity_executable_; | 1237 size_t capacity_executable_; |
1246 | 1238 |
1247 // Allocated space size in bytes. | 1239 // Allocated space size in bytes. |
1248 AtomicNumber<intptr_t> size_; | 1240 size_t size_; |
1249 // Allocated executable space size in bytes. | 1241 // Allocated executable space size in bytes. |
1250 AtomicNumber<intptr_t> size_executable_; | 1242 size_t size_executable_; |
1251 | 1243 |
1252 // We keep the lowest and highest addresses allocated as a quick way | 1244 // We keep the lowest and highest addresses allocated as a quick way |
1253 // of determining that pointers are outside the heap. The estimate is | 1245 // of determining that pointers are outside the heap. The estimate is |
1254 // conservative, i.e. not all addrsses in 'allocated' space are allocated | 1246 // conservative, i.e. not all addrsses in 'allocated' space are allocated |
1255 // to our heap. The range is [lowest, highest[, inclusive on the low end | 1247 // to our heap. The range is [lowest, highest[, inclusive on the low end |
1256 // and exclusive on the high end. | 1248 // and exclusive on the high end. |
1257 AtomicValue<void*> lowest_ever_allocated_; | 1249 void* lowest_ever_allocated_; |
1258 AtomicValue<void*> highest_ever_allocated_; | 1250 void* highest_ever_allocated_; |
1259 | 1251 |
1260 struct MemoryAllocationCallbackRegistration { | 1252 struct MemoryAllocationCallbackRegistration { |
1261 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1253 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
1262 ObjectSpace space, | 1254 ObjectSpace space, |
1263 AllocationAction action) | 1255 AllocationAction action) |
1264 : callback(callback), space(space), action(action) {} | 1256 : callback(callback), space(space), action(action) {} |
1265 MemoryAllocationCallback callback; | 1257 MemoryAllocationCallback callback; |
1266 ObjectSpace space; | 1258 ObjectSpace space; |
1267 AllocationAction action; | 1259 AllocationAction action; |
1268 }; | 1260 }; |
1269 | 1261 |
1270 // A List of callback that are triggered when memory is allocated or free'd | 1262 // A List of callback that are triggered when memory is allocated or free'd |
1271 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; | 1263 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; |
1272 | 1264 |
1273 // Initializes pages in a chunk. Returns the first page address. | 1265 // Initializes pages in a chunk. Returns the first page address. |
1274 // This function and GetChunkId() are provided for the mark-compact | 1266 // This function and GetChunkId() are provided for the mark-compact |
1275 // collector to rebuild page headers in the from space, which is | 1267 // collector to rebuild page headers in the from space, which is |
1276 // used as a marking stack and its page headers are destroyed. | 1268 // used as a marking stack and its page headers are destroyed. |
1277 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 1269 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
1278 PagedSpace* owner); | 1270 PagedSpace* owner); |
1279 | 1271 |
1280 void UpdateAllocatedSpaceLimits(void* low, void* high) { | 1272 void UpdateAllocatedSpaceLimits(void* low, void* high) { |
1281 // The use of atomic primitives does not guarantee correctness (wrt. | 1273 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); |
1282 // desired semantics) by default. The loop here ensures that we update the | 1274 highest_ever_allocated_ = Max(highest_ever_allocated_, high); |
1283 // values only if they did not change in between. | |
1284 void* ptr = nullptr; | |
1285 do { | |
1286 ptr = lowest_ever_allocated_.Value(); | |
1287 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); | |
1288 do { | |
1289 ptr = highest_ever_allocated_.Value(); | |
1290 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); | |
1291 } | 1275 } |
1292 | 1276 |
1293 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); | 1277 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
1294 }; | 1278 }; |
1295 | 1279 |
1296 | 1280 |
1297 // ----------------------------------------------------------------------------- | 1281 // ----------------------------------------------------------------------------- |
1298 // Interface for heap object iterator to be implemented by all object space | 1282 // Interface for heap object iterator to be implemented by all object space |
1299 // object iterators. | 1283 // object iterators. |
1300 // | 1284 // |
(...skipping 1636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2937 count = 0; | 2921 count = 0; |
2938 } | 2922 } |
2939 // Must be small, since an iteration is used for lookup. | 2923 // Must be small, since an iteration is used for lookup. |
2940 static const int kMaxComments = 64; | 2924 static const int kMaxComments = 64; |
2941 }; | 2925 }; |
2942 #endif | 2926 #endif |
2943 } | 2927 } |
2944 } // namespace v8::internal | 2928 } // namespace v8::internal |
2945 | 2929 |
2946 #endif // V8_HEAP_SPACES_H_ | 2930 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |