OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
(...skipping 1187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and | 1198 kRegular, // Pages of kPageSize that do not live in a CodeRange and |
1199 // can thus be used for stealing. | 1199 // can thus be used for stealing. |
1200 kNonRegular, // Large chunks and executable chunks. | 1200 kNonRegular, // Large chunks and executable chunks. |
1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. | 1201 kPooled, // Pooled chunks, already uncommited and ready for reuse. |
1202 kNumberOfChunkQueues, | 1202 kNumberOfChunkQueues, |
1203 }; | 1203 }; |
1204 | 1204 |
1205 template <ChunkQueueType type> | 1205 template <ChunkQueueType type> |
1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { | 1206 void AddMemoryChunkSafe(MemoryChunk* chunk) { |
1207 base::LockGuard<base::Mutex> guard(&mutex_); | 1207 base::LockGuard<base::Mutex> guard(&mutex_); |
1208 chunks_[type].push_back(chunk); | 1208 if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) { |
1209 chunks_[type].push_back(chunk); | |
1210 } else { | |
1211 DCHECK_EQ(type, kRegular); | |
1212 delayed_regular_chunks_.push_back(chunk); | |
1213 } | |
1209 } | 1214 } |
1210 | 1215 |
1211 template <ChunkQueueType type> | 1216 template <ChunkQueueType type> |
1212 MemoryChunk* GetMemoryChunkSafe() { | 1217 MemoryChunk* GetMemoryChunkSafe() { |
1213 base::LockGuard<base::Mutex> guard(&mutex_); | 1218 base::LockGuard<base::Mutex> guard(&mutex_); |
1214 if (chunks_[type].empty()) return nullptr; | 1219 if (chunks_[type].empty()) return nullptr; |
1215 MemoryChunk* chunk = chunks_[type].front(); | 1220 MemoryChunk* chunk = chunks_[type].front(); |
1216 chunks_[type].pop_front(); | 1221 chunks_[type].pop_front(); |
1217 return chunk; | 1222 return chunk; |
1218 } | 1223 } |
1219 | 1224 |
1225 void ReconsiderDelayedChunks(); | |
1220 void PerformFreeMemoryOnQueuedChunks(); | 1226 void PerformFreeMemoryOnQueuedChunks(); |
1221 | 1227 |
1222 base::Mutex mutex_; | 1228 base::Mutex mutex_; |
1223 MemoryAllocator* allocator_; | 1229 MemoryAllocator* allocator_; |
1224 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; | 1230 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
1231 // Delayed chunks cannot be processed in the current unmapping cycle because | |
1232 // of dependencies. See MemoryAllocator::CanFreeMemoryChunk. | |
Hannes Payer (out of office)
2016/08/18 09:38:42
Just spell out the "dependencies" here.
Michael Lippautz
2016/08/18 09:59:40
Done.
| |
1233 std::list<MemoryChunk*> delayed_regular_chunks_; | |
1225 base::Semaphore pending_unmapping_tasks_semaphore_; | 1234 base::Semaphore pending_unmapping_tasks_semaphore_; |
1226 intptr_t concurrent_unmapping_tasks_active_; | 1235 intptr_t concurrent_unmapping_tasks_active_; |
1227 | 1236 |
1228 friend class MemoryAllocator; | 1237 friend class MemoryAllocator; |
1229 }; | 1238 }; |
1230 | 1239 |
1231 enum AllocationMode { | 1240 enum AllocationMode { |
1232 kRegular, | 1241 kRegular, |
1233 kPooled, | 1242 kPooled, |
1234 }; | 1243 }; |
(...skipping 18 matching lines...) Expand all Loading... | |
1253 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, | 1262 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, |
1254 typename SpaceType> | 1263 typename SpaceType> |
1255 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); | 1264 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); |
1256 | 1265 |
1257 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, | 1266 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, |
1258 Executability executable); | 1267 Executability executable); |
1259 | 1268 |
1260 template <MemoryAllocator::FreeMode mode = kFull> | 1269 template <MemoryAllocator::FreeMode mode = kFull> |
1261 void Free(MemoryChunk* chunk); | 1270 void Free(MemoryChunk* chunk); |
1262 | 1271 |
1272 bool CanFreeMemoryChunk(MemoryChunk* chunk); | |
1273 | |
1263 // Returns allocated spaces in bytes. | 1274 // Returns allocated spaces in bytes. |
1264 intptr_t Size() { return size_.Value(); } | 1275 intptr_t Size() { return size_.Value(); } |
1265 | 1276 |
1266 // Returns allocated executable spaces in bytes. | 1277 // Returns allocated executable spaces in bytes. |
1267 intptr_t SizeExecutable() { return size_executable_.Value(); } | 1278 intptr_t SizeExecutable() { return size_executable_.Value(); } |
1268 | 1279 |
1269 // Returns the maximum available bytes of heaps. | 1280 // Returns the maximum available bytes of heaps. |
1270 intptr_t Available() { | 1281 intptr_t Available() { |
1271 intptr_t size = Size(); | 1282 intptr_t size = Size(); |
1272 return capacity_ < size ? 0 : capacity_ - size; | 1283 return capacity_ < size ? 0 : capacity_ - size; |
(...skipping 1756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3029 count = 0; | 3040 count = 0; |
3030 } | 3041 } |
3031 // Must be small, since an iteration is used for lookup. | 3042 // Must be small, since an iteration is used for lookup. |
3032 static const int kMaxComments = 64; | 3043 static const int kMaxComments = 64; |
3033 }; | 3044 }; |
3034 #endif | 3045 #endif |
3035 } // namespace internal | 3046 } // namespace internal |
3036 } // namespace v8 | 3047 } // namespace v8 |
3037 | 3048 |
3038 #endif // V8_HEAP_SPACES_H_ | 3049 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |