OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
(...skipping 1187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and | 1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and |
1199 // can thus be used for stealing. | 1199 // can thus be used for stealing. |
1200 kNonRegular, // Large chunks and executable chunks. | 1200 kNonRegular, // Large chunks and executable chunks. |
1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. | 1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. |
1202 kNumberOfChunkQueues, | 1202 kNumberOfChunkQueues, |
1203 }; | 1203 }; |
1204 | 1204 |
1205 template <ChunkQueueType type> | 1205 template <ChunkQueueType type> |
1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { | 1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { |
1207 base::LockGuard<base::Mutex> guard(&mutex_); | 1207 base::LockGuard<base::Mutex> guard(&mutex_); |
1208 if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) { | 1208 chunks_[type].push_back(chunk); |
1209 chunks_[type].push_back(chunk); | |
1210 } else { | |
1211 DCHECK_EQ(type, kRegular); | |
1212 delayed_regular_chunks_.push_back(chunk); | |
1213 } | |
1214 } | 1209 } |
1215 | 1210 |
1216 template <ChunkQueueType type> | 1211 template <ChunkQueueType type> |
1217 MemoryChunk* GetMemoryChunkSafe() { | 1212 MemoryChunk* GetMemoryChunkSafe() { |
1218 base::LockGuard<base::Mutex> guard(&mutex_); | 1213 base::LockGuard<base::Mutex> guard(&mutex_); |
1219 if (chunks_[type].empty()) return nullptr; | 1214 if (chunks_[type].empty()) return nullptr; |
1220 MemoryChunk* chunk = chunks_[type].front(); | 1215 MemoryChunk* chunk = chunks_[type].front(); |
1221 chunks_[type].pop_front(); | 1216 chunks_[type].pop_front(); |
1222 return chunk; | 1217 return chunk; |
1223 } | 1218 } |
1224 | 1219 |
1225 void ReconsiderDelayedChunks(); | |
1226 void PerformFreeMemoryOnQueuedChunks(); | 1220 void PerformFreeMemoryOnQueuedChunks(); |
1227 | 1221 |
1228 base::Mutex mutex_; | 1222 base::Mutex mutex_; |
1229 MemoryAllocator* allocator_; | 1223 MemoryAllocator* allocator_; |
1230 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; | 1224 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
1231 // Delayed chunks cannot be processed in the current unmapping cycle because | |
1232 // of dependencies such as an active sweeper. | |
1233 // See MemoryAllocator::CanFreeMemoryChunk. | |
1234 std::list<MemoryChunk*> delayed_regular_chunks_; | |
1235 base::Semaphore pending_unmapping_tasks_semaphore_; | 1225 base::Semaphore pending_unmapping_tasks_semaphore_; |
1236 intptr_t concurrent_unmapping_tasks_active_; | 1226 intptr_t concurrent_unmapping_tasks_active_; |
1237 | 1227 |
1238 friend class MemoryAllocator; | 1228 friend class MemoryAllocator; |
1239 }; | 1229 }; |
1240 | 1230 |
1241 enum AllocationMode { | 1231 enum AllocationMode { |
1242 kRegular, | 1232 kRegular, |
1243 kPooled, | 1233 kPooled, |
1244 }; | 1234 }; |
(...skipping 18 matching lines...) Expand all Loading... |
1263 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, | 1253 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, |
1264 typename SpaceType> | 1254 typename SpaceType> |
1265 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); | 1255 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); |
1266 | 1256 |
1267 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, | 1257 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, |
1268 Executability executable); | 1258 Executability executable); |
1269 | 1259 |
1270 template <MemoryAllocator::FreeMode mode = kFull> | 1260 template <MemoryAllocator::FreeMode mode = kFull> |
1271 void Free(MemoryChunk* chunk); | 1261 void Free(MemoryChunk* chunk); |
1272 | 1262 |
1273 bool CanFreeMemoryChunk(MemoryChunk* chunk); | |
1274 | |
1275 // Returns allocated spaces in bytes. | 1263 // Returns allocated spaces in bytes. |
1276 intptr_t Size() { return size_.Value(); } | 1264 intptr_t Size() { return size_.Value(); } |
1277 | 1265 |
1278 // Returns allocated executable spaces in bytes. | 1266 // Returns allocated executable spaces in bytes. |
1279 intptr_t SizeExecutable() { return size_executable_.Value(); } | 1267 intptr_t SizeExecutable() { return size_executable_.Value(); } |
1280 | 1268 |
1281 // Returns the maximum available bytes of heaps. | 1269 // Returns the maximum available bytes of heaps. |
1282 intptr_t Available() { | 1270 intptr_t Available() { |
1283 intptr_t size = Size(); | 1271 intptr_t size = Size(); |
1284 return capacity_ < size ? 0 : capacity_ - size; | 1272 return capacity_ < size ? 0 : capacity_ - size; |
(...skipping 1756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3041 count = 0; | 3029 count = 0; |
3042 } | 3030 } |
3043 // Must be small, since an iteration is used for lookup. | 3031 // Must be small, since an iteration is used for lookup. |
3044 static const int kMaxComments = 64; | 3032 static const int kMaxComments = 64; |
3045 }; | 3033 }; |
3046 #endif | 3034 #endif |
3047 } // namespace internal | 3035 } // namespace internal |
3048 } // namespace v8 | 3036 } // namespace v8 |
3049 | 3037 |
3050 #endif // V8_HEAP_SPACES_H_ | 3038 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |