OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
(...skipping 1187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and | 1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and |
1199 // can thus be used for stealing. | 1199 // can thus be used for stealing. |
1200 kNonRegular, // Large chunks and executable chunks. | 1200 kNonRegular, // Large chunks and executable chunks. |
1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. | 1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. |
1202 kNumberOfChunkQueues, | 1202 kNumberOfChunkQueues, |
1203 }; | 1203 }; |
1204 | 1204 |
1205 template <ChunkQueueType type> | 1205 template <ChunkQueueType type> |
1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { | 1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { |
1207 base::LockGuard<base::Mutex> guard(&mutex_); | 1207 base::LockGuard<base::Mutex> guard(&mutex_); |
1208 chunks_[type].push_back(chunk); | 1208 if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) { |
| 1209 chunks_[type].push_back(chunk); |
| 1210 } else { |
| 1211 DCHECK_EQ(type, kRegular); |
| 1212 delayed_regular_chunks_.push_back(chunk); |
| 1213 } |
1209 } | 1214 } |
1210 | 1215 |
1211 template <ChunkQueueType type> | 1216 template <ChunkQueueType type> |
1212 MemoryChunk* GetMemoryChunkSafe() { | 1217 MemoryChunk* GetMemoryChunkSafe() { |
1213 base::LockGuard<base::Mutex> guard(&mutex_); | 1218 base::LockGuard<base::Mutex> guard(&mutex_); |
1214 if (chunks_[type].empty()) return nullptr; | 1219 if (chunks_[type].empty()) return nullptr; |
1215 MemoryChunk* chunk = chunks_[type].front(); | 1220 MemoryChunk* chunk = chunks_[type].front(); |
1216 chunks_[type].pop_front(); | 1221 chunks_[type].pop_front(); |
1217 return chunk; | 1222 return chunk; |
1218 } | 1223 } |
1219 | 1224 |
| 1225 void ReconsiderDelayedChunks(); |
1220 void PerformFreeMemoryOnQueuedChunks(); | 1226 void PerformFreeMemoryOnQueuedChunks(); |
1221 | 1227 |
1222 base::Mutex mutex_; | 1228 base::Mutex mutex_; |
1223 MemoryAllocator* allocator_; | 1229 MemoryAllocator* allocator_; |
1224 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; | 1230 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
| 1231 // Delayed chunks cannot be processed in the current unmapping cycle because |
| 1232 // of dependencies such as an active sweeper. |
| 1233 // See MemoryAllocator::CanFreeMemoryChunk. |
| 1234 std::list<MemoryChunk*> delayed_regular_chunks_; |
1225 base::Semaphore pending_unmapping_tasks_semaphore_; | 1235 base::Semaphore pending_unmapping_tasks_semaphore_; |
1226 intptr_t concurrent_unmapping_tasks_active_; | 1236 intptr_t concurrent_unmapping_tasks_active_; |
1227 | 1237 |
1228 friend class MemoryAllocator; | 1238 friend class MemoryAllocator; |
1229 }; | 1239 }; |
1230 | 1240 |
1231 enum AllocationMode { | 1241 enum AllocationMode { |
1232 kRegular, | 1242 kRegular, |
1233 kPooled, | 1243 kPooled, |
1234 }; | 1244 }; |
(...skipping 18 matching lines...) Expand all Loading... |
1253 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, | 1263 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, |
1254 typename SpaceType> | 1264 typename SpaceType> |
1255 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); | 1265 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); |
1256 | 1266 |
1257 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, | 1267 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, |
1258 Executability executable); | 1268 Executability executable); |
1259 | 1269 |
1260 template <MemoryAllocator::FreeMode mode = kFull> | 1270 template <MemoryAllocator::FreeMode mode = kFull> |
1261 void Free(MemoryChunk* chunk); | 1271 void Free(MemoryChunk* chunk); |
1262 | 1272 |
| 1273 bool CanFreeMemoryChunk(MemoryChunk* chunk); |
| 1274 |
1263 // Returns allocated spaces in bytes. | 1275 // Returns allocated spaces in bytes. |
1264 intptr_t Size() { return size_.Value(); } | 1276 intptr_t Size() { return size_.Value(); } |
1265 | 1277 |
1266 // Returns allocated executable spaces in bytes. | 1278 // Returns allocated executable spaces in bytes. |
1267 intptr_t SizeExecutable() { return size_executable_.Value(); } | 1279 intptr_t SizeExecutable() { return size_executable_.Value(); } |
1268 | 1280 |
1269 // Returns the maximum available bytes of heaps. | 1281 // Returns the maximum available bytes of heaps. |
1270 intptr_t Available() { | 1282 intptr_t Available() { |
1271 intptr_t size = Size(); | 1283 intptr_t size = Size(); |
1272 return capacity_ < size ? 0 : capacity_ - size; | 1284 return capacity_ < size ? 0 : capacity_ - size; |
(...skipping 1756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3029 count = 0; | 3041 count = 0; |
3030 } | 3042 } |
3031 // Must be small, since an iteration is used for lookup. | 3043 // Must be small, since an iteration is used for lookup. |
3032 static const int kMaxComments = 64; | 3044 static const int kMaxComments = 64; |
3033 }; | 3045 }; |
3034 #endif | 3046 #endif |
3035 } // namespace internal | 3047 } // namespace internal |
3036 } // namespace v8 | 3048 } // namespace v8 |
3037 | 3049 |
3038 #endif // V8_HEAP_SPACES_H_ | 3050 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |