OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
379 // Even if the mutator writes to them they will be kept black and a white | 379 // Even if the mutator writes to them they will be kept black and a white |
380 // to grey transition is performed in the value. | 380 // to grey transition is performed in the value. |
381 HAS_PROGRESS_BAR, | 381 HAS_PROGRESS_BAR, |
382 | 382 |
383 // This flag is intended to be used for testing. Works only when both | 383 // This flag is intended to be used for testing. Works only when both |
384 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection | 384 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
385 // are set. It forces the page to become an evacuation candidate at next | 385 // are set. It forces the page to become an evacuation candidate at next |
386 // candidates selection cycle. | 386 // candidates selection cycle. |
387 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | 387 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
388 | 388 |
| 389 // The memory chunk is already logically freed, however the actual freeing |
| 390 // still has to be performed. |
| 391 PRE_FREED, |
| 392 |
389 // Last flag, keep at bottom. | 393 // Last flag, keep at bottom. |
390 NUM_MEMORY_CHUNK_FLAGS | 394 NUM_MEMORY_CHUNK_FLAGS |
391 }; | 395 }; |
392 | 396 |
393 | 397 |
394 static const int kPointersToHereAreInterestingMask = | 398 static const int kPointersToHereAreInterestingMask = |
395 1 << POINTERS_TO_HERE_ARE_INTERESTING; | 399 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
396 | 400 |
397 static const int kPointersFromHereAreInterestingMask = | 401 static const int kPointersFromHereAreInterestingMask = |
398 1 << POINTERS_FROM_HERE_ARE_INTERESTING; | 402 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
650 } | 654 } |
651 | 655 |
652 Address area_start() { return area_start_; } | 656 Address area_start() { return area_start_; } |
653 Address area_end() { return area_end_; } | 657 Address area_end() { return area_end_; } |
654 int area_size() { return static_cast<int>(area_end() - area_start()); } | 658 int area_size() { return static_cast<int>(area_end() - area_start()); } |
655 bool CommitArea(size_t requested); | 659 bool CommitArea(size_t requested); |
656 | 660 |
657 // Approximate amount of physical memory committed for this chunk. | 661 // Approximate amount of physical memory committed for this chunk. |
658 size_t CommittedPhysicalMemory() { return high_water_mark_; } | 662 size_t CommittedPhysicalMemory() { return high_water_mark_; } |
659 | 663 |
| 664 // Should be called when memory chunk is about to be freed. |
| 665 void ReleaseAllocatedMemory(); |
| 666 |
660 static inline void UpdateHighWaterMark(Address mark) { | 667 static inline void UpdateHighWaterMark(Address mark) { |
661 if (mark == NULL) return; | 668 if (mark == NULL) return; |
662 // Need to subtract one from the mark because when a chunk is full the | 669 // Need to subtract one from the mark because when a chunk is full the |
663 // top points to the next address after the chunk, which effectively belongs | 670 // top points to the next address after the chunk, which effectively belongs |
664 // to another chunk. See the comment to Page::FromAllocationTop. | 671 // to another chunk. See the comment to Page::FromAllocationTop. |
665 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | 672 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
666 int new_mark = static_cast<int>(mark - chunk->address()); | 673 int new_mark = static_cast<int>(mark - chunk->address()); |
667 if (new_mark > chunk->high_water_mark_) { | 674 if (new_mark > chunk->high_water_mark_) { |
668 chunk->high_water_mark_ = new_mark; | 675 chunk->high_water_mark_ = new_mark; |
669 } | 676 } |
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
973 FreeBlock(void* start_arg, size_t size_arg) | 980 FreeBlock(void* start_arg, size_t size_arg) |
974 : start(static_cast<Address>(start_arg)), size(size_arg) { | 981 : start(static_cast<Address>(start_arg)), size(size_arg) { |
975 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); | 982 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
976 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); | 983 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
977 } | 984 } |
978 | 985 |
979 Address start; | 986 Address start; |
980 size_t size; | 987 size_t size; |
981 }; | 988 }; |
982 | 989 |
| 990 // All access to free_list_ require to take the free_list_mutex_. GC threads |
| 991 // may access the free_list_ concurrently to the main thread. |
| 992 base::Mutex free_list_mutex_; |
| 993 |
983 // Freed blocks of memory are added to the free list. When the allocation | 994 // Freed blocks of memory are added to the free list. When the allocation |
984 // list is exhausted, the free list is sorted and merged to make the new | 995 // list is exhausted, the free list is sorted and merged to make the new |
985 // allocation list. | 996 // allocation list. |
986 List<FreeBlock> free_list_; | 997 List<FreeBlock> free_list_; |
| 998 |
987 // Memory is allocated from the free blocks on the allocation list. | 999 // Memory is allocated from the free blocks on the allocation list. |
988 // The block at current_allocation_block_index_ is the current block. | 1000 // The block at current_allocation_block_index_ is the current block. |
989 List<FreeBlock> allocation_list_; | 1001 List<FreeBlock> allocation_list_; |
990 int current_allocation_block_index_; | 1002 int current_allocation_block_index_; |
991 | 1003 |
992 // Emergency block guarantees that we can always allocate a page for | 1004 // Emergency block guarantees that we can always allocate a page for |
993 // evacuation candidates when code space is compacted. Emergency block is | 1005 // evacuation candidates when code space is compacted. Emergency block is |
994 // reserved immediately after GC and is released immedietely before | 1006 // reserved immediately after GC and is released immedietely before |
995 // allocating a page for evacuation. | 1007 // allocating a page for evacuation. |
996 FreeBlock emergency_block_; | 1008 FreeBlock emergency_block_; |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1072 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); | 1084 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
1073 | 1085 |
1074 void TearDown(); | 1086 void TearDown(); |
1075 | 1087 |
1076 Page* AllocatePage(intptr_t size, PagedSpace* owner, | 1088 Page* AllocatePage(intptr_t size, PagedSpace* owner, |
1077 Executability executable); | 1089 Executability executable); |
1078 | 1090 |
1079 LargePage* AllocateLargePage(intptr_t object_size, Space* owner, | 1091 LargePage* AllocateLargePage(intptr_t object_size, Space* owner, |
1080 Executability executable); | 1092 Executability executable); |
1081 | 1093 |
| 1094 // PreFree logically frees the object, i.e., it takes care of the size |
| 1095 // bookkeeping and calls the allocation callback. |
| 1096 void PreFreeMemory(MemoryChunk* chunk); |
| 1097 |
| 1098 // FreeMemory can be called concurrently when PreFree was executed before. |
| 1099 void PerformFreeMemory(MemoryChunk* chunk); |
| 1100 |
| 1101 // Free is a wrapper method, which calls PreFree and PerformFreeMemory |
| 1102 // together. |
1082 void Free(MemoryChunk* chunk); | 1103 void Free(MemoryChunk* chunk); |
1083 | 1104 |
1084 // Returns the maximum available bytes of heaps. | 1105 // Returns the maximum available bytes of heaps. |
1085 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 1106 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
1086 | 1107 |
1087 // Returns allocated spaces in bytes. | 1108 // Returns allocated spaces in bytes. |
1088 intptr_t Size() { return size_; } | 1109 intptr_t Size() { return size_; } |
1089 | 1110 |
1090 // Returns the maximum available executable bytes of heaps. | 1111 // Returns the maximum available executable bytes of heaps. |
1091 intptr_t AvailableExecutable() { | 1112 intptr_t AvailableExecutable() { |
(...skipping 29 matching lines...) Expand all Loading... |
1121 Executability executable, Space* space); | 1142 Executability executable, Space* space); |
1122 | 1143 |
1123 Address ReserveAlignedMemory(size_t requested, size_t alignment, | 1144 Address ReserveAlignedMemory(size_t requested, size_t alignment, |
1124 base::VirtualMemory* controller); | 1145 base::VirtualMemory* controller); |
1125 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, | 1146 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
1126 size_t alignment, Executability executable, | 1147 size_t alignment, Executability executable, |
1127 base::VirtualMemory* controller); | 1148 base::VirtualMemory* controller); |
1128 | 1149 |
1129 bool CommitMemory(Address addr, size_t size, Executability executable); | 1150 bool CommitMemory(Address addr, size_t size, Executability executable); |
1130 | 1151 |
| 1152 void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation, |
| 1153 Executability executable); |
1131 void FreeMemory(base::VirtualMemory* reservation, Executability executable); | 1154 void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
1132 void FreeMemory(Address addr, size_t size, Executability executable); | 1155 void FreeMemory(Address addr, size_t size, Executability executable); |
1133 | 1156 |
1134 // Commit a contiguous block of memory from the initial chunk. Assumes that | 1157 // Commit a contiguous block of memory from the initial chunk. Assumes that |
1135 // the address is not NULL, the size is greater than zero, and that the | 1158 // the address is not NULL, the size is greater than zero, and that the |
1136 // block is contained in the initial chunk. Returns true if it succeeded | 1159 // block is contained in the initial chunk. Returns true if it succeeded |
1137 // and false otherwise. | 1160 // and false otherwise. |
1138 bool CommitBlock(Address start, size_t size, Executability executable); | 1161 bool CommitBlock(Address start, size_t size, Executability executable); |
1139 | 1162 |
1140 // Uncommit a contiguous block of memory [start..(start+size)[. | 1163 // Uncommit a contiguous block of memory [start..(start+size)[. |
(...skipping 1687 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2828 count = 0; | 2851 count = 0; |
2829 } | 2852 } |
2830 // Must be small, since an iteration is used for lookup. | 2853 // Must be small, since an iteration is used for lookup. |
2831 static const int kMaxComments = 64; | 2854 static const int kMaxComments = 64; |
2832 }; | 2855 }; |
2833 #endif | 2856 #endif |
2834 } | 2857 } |
2835 } // namespace v8::internal | 2858 } // namespace v8::internal |
2836 | 2859 |
2837 #endif // V8_HEAP_SPACES_H_ | 2860 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |